commit 42172cbb6fd8ffe0a7851c46570a924d4919027a Author: Chris Date: Fri Oct 24 11:42:14 2025 +0200 init diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..eb828e0 --- /dev/null +++ b/.env.example @@ -0,0 +1,30 @@ +# Distributed Llama Docker Environment Configuration +# Copy this file to .env and customize as needed + +# Model configuration +MODEL_NAME=llama3_2_3b_instruct_q40 +MAX_SEQ_LEN=4096 +BUFFER_FLOAT_TYPE=q80 + +# Thread configuration +CONTROLLER_NTHREADS=4 +WORKER_NTHREADS=4 + +# To use a different model, change MODEL_NAME to one of: +# - llama3_1_8b_instruct_q40 +# - llama3_1_405b_instruct_q40 +# - llama3_2_1b_instruct_q40 +# - llama3_2_3b_instruct_q40 +# - llama3_3_70b_instruct_q40 +# - deepseek_r1_distill_llama_8b_q40 +# - qwen3_0.6b_q40 +# - qwen3_1.7b_q40 +# - qwen3_8b_q40 +# - qwen3_14b_q40 +# - qwen3_30b_a3b_q40 + +# Performance tuning: +# - Adjust CONTROLLER_NTHREADS and WORKER_NTHREADS based on your Pi's CPU cores +# - For Pi 4 (4 cores): use 4 threads +# - For Pi 3 (4 cores): use 2-4 threads +# - For Pi Zero 2 (4 cores): use 2 threads \ No newline at end of file diff --git a/.github/8raspi.jpg b/.github/8raspi.jpg new file mode 100644 index 0000000..44f02b0 Binary files /dev/null and b/.github/8raspi.jpg differ diff --git a/.github/8raspi2.jpg b/.github/8raspi2.jpg new file mode 100644 index 0000000..e4f24c6 Binary files /dev/null and b/.github/8raspi2.jpg differ diff --git a/.github/cover.png b/.github/cover.png new file mode 100644 index 0000000..57d1ef7 Binary files /dev/null and b/.github/cover.png differ diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..f0e32c1 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,63 @@ +name: main +on: + pull_request: + branches: + - main + - feat/nn + push: + branches: + - main + - feat/nn +jobs: + build-linux: + name: Linux + runs-on: ${{matrix.runner}} + strategy: + matrix: + include: + - runner: ubuntu-22.04 + arch: amd64 + - runner: ubuntu-24.04-arm + arch: arm64 + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + - name: Dependencies + id: dependencies + run: sudo apt-get update && sudo apt-get install build-essential + - name: Build + id: build + run: | + make dllama + make nn-cpu-test + make nn-cpu-ops-test + make tokenizer-test + - name: nn-cpu-test + run: ./nn-cpu-test + - name: nn-cpu-ops-test + run: ./nn-cpu-ops-test + - name: tokenizer-test + run: ./tokenizer-test + + build-windows: + name: Windows + runs-on: windows-latest + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + - name: Dependencies + id: dependencies + run: choco install make + - name: Build + id: build + run: | + make dllama + make nn-cpu-test + make nn-cpu-ops-test + make tokenizer-test + - name: nn-cpu-test + run: ./nn-cpu-test + - name: nn-cpu-ops-test + run: ./nn-cpu-ops-test + - name: tokenizer-test + run: ./tokenizer-test diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f17b4ad --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +.vscode/settings.json + +*.o +*.0 +*.dSYM +*.data +*.temp +*.tmp +__pycache__ + +*-test +/models +main +run*.sh +server +/dllama +/dllama-* +*.exe +*.spv \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..0710ff0 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "main", + "type": "cppdbg", + "request": "launch", + "program": "${workspaceFolder}/main", + "args": [], + "stopAtEntry": false, + "cwd": "${workspaceFolder}", + "environment": [], + "externalConsole": false, + "MIMode": "lldb" + } + ] +} diff --git a/DOCKER_README.md b/DOCKER_README.md new file mode 100644 index 0000000..744599f --- /dev/null +++ b/DOCKER_README.md @@ -0,0 +1,202 @@ +# Distributed Llama Docker Setup for Raspberry Pi + +This directory contains Docker configurations to run Distributed Llama on Raspberry Pi devices using containers. There are two variants: + +1. **Controller** (`Dockerfile.controller`) - Downloads models and runs the API server +2. **Worker** (`Dockerfile.worker`) - Runs worker nodes that connect to the controller + +## Quick Start with Docker Compose + +### 1. Download a Model + +First, download a model using the controller container: + +```bash +# Create a models directory +mkdir -p models + +# Download a model (this will take some time) +docker-compose run --rm controller --download llama3_2_3b_instruct_q40 +``` + +### 2. Start the Distributed Setup + +```bash +# Start all services (1 controller + 3 workers) +docker-compose up +``` + +The API will be available at `http://localhost:9999` + +### 3. Test the API + +```bash +# List available models +curl http://localhost:9999/v1/models + +# Send a chat completion request +curl -X POST http://localhost:9999/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "llama", + "messages": [{"role": "user", "content": "Hello, how are you?"}], + "max_tokens": 100 + }' +``` + +## Manual Docker Usage + +### Building the Images + +```bash +# Build controller image +docker build -f Dockerfile.controller -t distributed-llama-controller . + +# Build worker image +docker build -f Dockerfile.worker -t distributed-llama-worker . +``` + +### Running the Controller + +```bash +# Download a model first +docker run -v ./models:/app/models distributed-llama-controller --download llama3_2_3b_instruct_q40 + +# Run API server (standalone mode, no workers) +docker run -p 9999:9999 -v ./models:/app/models distributed-llama-controller \ + --model llama3_2_3b_instruct_q40 + +# Run API server with workers +docker run -p 9999:9999 -v ./models:/app/models distributed-llama-controller \ + --model llama3_2_3b_instruct_q40 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +### Running Workers + +```bash +# Run a worker on default port 9999 +docker run -p 9999:9999 distributed-llama-worker + +# Run a worker with custom settings +docker run -p 9998:9998 distributed-llama-worker --port 9998 --nthreads 2 +``` + +## Available Models + +You can download any of these models: + +- `llama3_1_8b_instruct_q40` +- `llama3_1_405b_instruct_q40` (very large, 56 parts) +- `llama3_2_1b_instruct_q40` +- `llama3_2_3b_instruct_q40` +- `llama3_3_70b_instruct_q40` +- `deepseek_r1_distill_llama_8b_q40` +- `qwen3_0.6b_q40` +- `qwen3_1.7b_q40` +- `qwen3_8b_q40` +- `qwen3_14b_q40` +- `qwen3_30b_a3b_q40` + +## Configuration Options + +### Controller Options + +- `--model `: Model name to use (required) +- `--port `: API server port (default: 9999) +- `--nthreads `: Number of threads (default: 4) +- `--max-seq-len `: Maximum sequence length (default: 4096) +- `--buffer-float-type `: Buffer float type (default: q80) +- `--workers `: Space-separated worker addresses +- `--download `: Download a model and exit + +### Worker Options + +- `--port `: Worker port (default: 9999) +- `--nthreads `: Number of threads (default: 4) + +## Environment Variables (Docker Compose) + +You can customize the setup using environment variables: + +```bash +# Set model and thread counts +MODEL_NAME=llama3_2_1b_instruct_q40 \ +CONTROLLER_NTHREADS=2 \ +WORKER_NTHREADS=2 \ +docker-compose up +``` + +Available variables: +- `MODEL_NAME`: Model to use (default: llama3_2_3b_instruct_q40) +- `CONTROLLER_NTHREADS`: Controller threads (default: 4) +- `WORKER_NTHREADS`: Worker threads (default: 4) +- `MAX_SEQ_LEN`: Maximum sequence length (default: 4096) +- `BUFFER_FLOAT_TYPE`: Buffer float type (default: q80) + +## Multi-Device Setup + +To run across multiple Raspberry Pi devices: + +### Device 1 (Controller) +```bash +# Run controller +docker run -p 9999:9999 -v ./models:/app/models distributed-llama-controller \ + --model llama3_2_3b_instruct_q40 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +### Devices 2-4 (Workers) +```bash +# Run worker on each device +docker run -p 9999:9999 distributed-llama-worker --nthreads 4 +``` + +## Performance Tips + +1. **Thread Count**: Set `--nthreads` to the number of CPU cores on each device +2. **Memory**: Larger models require more RAM. Monitor usage with `docker stats` +3. **Network**: Use wired Ethernet connections for better performance between devices +4. **Storage**: Use fast SD cards (Class 10 or better) or USB 3.0 storage for model files + +## Troubleshooting + +### Model Download Issues +```bash +# Check if model files exist +ls -la models/llama3_2_3b_instruct_q40/ + +# Re-download if corrupted +docker-compose run --rm controller --download llama3_2_3b_instruct_q40 +``` + +### Worker Connection Issues +```bash +# Check worker logs +docker-compose logs worker1 + +# Test network connectivity +docker exec -it ping 172.20.0.11 +``` + +### Resource Issues +```bash +# Monitor resource usage +docker stats + +# Reduce thread count if CPU usage is too high +CONTROLLER_NTHREADS=2 WORKER_NTHREADS=2 docker-compose up +``` + +## Web Interface + +You can use the web chat interface at [llama-ui.js.org](https://llama-ui.js.org/): + +1. Open the website +2. Go to settings +3. Set base URL to: `http://your-pi-ip:9999` +4. Save and start chatting + +## License + +This Docker setup follows the same license as the main Distributed Llama project. \ No newline at end of file diff --git a/Dockerfile.controller b/Dockerfile.controller new file mode 100644 index 0000000..070366a --- /dev/null +++ b/Dockerfile.controller @@ -0,0 +1,160 @@ +# Dockerfile for Distributed Llama Controller (Raspberry Pi) +# This variant can download models and start the API server +FROM arm64v8/debian:bookworm-slim + +# Install dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + g++ \ + make \ + git \ + python3 \ + python3-pip \ + curl \ + wget \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Copy source code +COPY src/ ./src/ +COPY Makefile ./ +COPY launch.py ./ + +# Build the applications +RUN make dllama && make dllama-api + +# Create models directory for volume mount +RUN mkdir -p /app/models + +# Create a script to download models +COPY <" + echo "Available models:" + python3 launch.py + exit 1 +fi + +python3 launch.py "\$1" -skip-run -skip-script -y +EOF + +RUN chmod +x /app/download-model.sh + +# Create entrypoint script +COPY < Download a model and exit" + echo " --model Model name to use" + echo " --port API server port (default: 9999)" + echo " --nthreads Number of threads (default: 4)" + echo " --max-seq-len Maximum sequence length (default: 4096)" + echo " --buffer-float-type Buffer float type (default: q80)" + echo " --workers Space-separated list of worker addresses (e.g., 10.0.0.2:9999 10.0.0.3:9999)" + echo "" + echo "Examples:" + echo " # Download a model" + echo " docker run -v ./models:/app/models distributed-llama-controller --download llama3_2_3b_instruct_q40" + echo "" + echo " # Run API server with workers" + echo " docker run -p 9999:9999 -v ./models:/app/models distributed-llama-controller \\" + echo " --model llama3_2_3b_instruct_q40 --workers 10.0.0.2:9999 10.0.0.3:9999" + exit 0 + ;; + *) + echo "Unknown option: \$1" + exit 1 + ;; + esac +done + +if [ -z "\$MODEL_NAME" ]; then + echo "Error: --model is required" + echo "Use --help for usage information" + exit 1 +fi + +MODEL_PATH="/app/models/\$MODEL_NAME/dllama_model_\$MODEL_NAME.m" +TOKENIZER_PATH="/app/models/\$MODEL_NAME/dllama_tokenizer_\$MODEL_NAME.t" + +if [ ! -f "\$MODEL_PATH" ] || [ ! -f "\$TOKENIZER_PATH" ]; then + echo "Error: Model files not found for \$MODEL_NAME" + echo "Model path: \$MODEL_PATH" + echo "Tokenizer path: \$TOKENIZER_PATH" + echo "" + echo "Please download the model first:" + echo "docker run -v ./models:/app/models distributed-llama-controller --download \$MODEL_NAME" + exit 1 +fi + +# Build the command +CMD="./dllama-api --port \$API_PORT --model \$MODEL_PATH --tokenizer \$TOKENIZER_PATH --buffer-float-type \$BUFFER_FLOAT_TYPE --nthreads \$NTHREADS --max-seq-len \$MAX_SEQ_LEN" + +if [ ! -z "\$WORKERS" ]; then + CMD="\$CMD --workers \$WORKERS" +fi + +echo "Starting API server with command:" +echo "\$CMD" +echo "" + +exec \$CMD +EOF + +RUN chmod +x /app/entrypoint.sh + +# Expose the default API port +EXPOSE 9999 + +# Use the entrypoint script +ENTRYPOINT ["/app/entrypoint.sh"] \ No newline at end of file diff --git a/Dockerfile.worker b/Dockerfile.worker new file mode 100644 index 0000000..782e76e --- /dev/null +++ b/Dockerfile.worker @@ -0,0 +1,75 @@ +# Dockerfile for Distributed Llama Worker (Raspberry Pi) +# This variant runs as a worker node and connects to a controller +FROM arm64v8/debian:bookworm-slim + +# Install dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + g++ \ + make \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Copy source code +COPY src/ ./src/ +COPY Makefile ./ + +# Build only the worker application +RUN make dllama + +# Create entrypoint script +COPY < Worker port (default: 9999)" + echo " --nthreads Number of threads (default: 4)" + echo "" + echo "Example:" + echo " docker run -p 9999:9999 distributed-llama-worker --port 9999 --nthreads 4" + exit 0 + ;; + *) + echo "Unknown option: \$1" + exit 1 + ;; + esac +done + +# Build the command +CMD="./dllama worker --port \$PORT --nthreads \$NTHREADS" + +echo "Starting worker with command:" +echo "\$CMD" +echo "" + +exec \$CMD +EOF + +RUN chmod +x /app/entrypoint.sh + +# Expose the default worker port +EXPOSE 9999 + +# Use the entrypoint script +ENTRYPOINT ["/app/entrypoint.sh"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..bca80dd --- /dev/null +++ b/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2024 Bartล‚omiej Tadych (b4rtaz) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..f16c12c --- /dev/null +++ b/Makefile @@ -0,0 +1,90 @@ +CXX = g++ +CXXFLAGS = -std=c++11 -Werror -Wformat -Werror=format-security + +ifndef TERMUX_VERSION + CXXFLAGS += -march=native -mtune=native +endif + +ifdef DEBUG + CXXFLAGS += -g -fsanitize=address +else + CXXFLAGS += -O3 +endif + +ifdef WVLA + CXXFLAGS += -Wvla-extension +endif + +ifdef DLLAMA_VULKAN + CGLSLC = glslc + +ifeq ($(OS),Windows_NT) + LIBS += -L$(VK_SDK_PATH)\lib -lvulkan-1 + CXXFLAGS += -DDLLAMA_VULKAN -I$(VK_SDK_PATH)\include +else + LIBS += -lvulkan + CXXFLAGS += -DDLLAMA_VULKAN +endif + + DEPS += nn-vulkan.o +endif + +ifeq ($(OS),Windows_NT) + LIBS += -lws2_32 + DELETE_CMD = del /f +else + LIBS += -lpthread + DELETE_CMD = rm -fv +endif + +.PHONY: clean dllama + +clean: + $(DELETE_CMD) *.o dllama dllama-* socket-benchmark mmap-buffer-* *-test *.exe + +# nn +nn-quants.o: src/nn/nn-quants.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-core.o: src/nn/nn-core.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-executor.o: src/nn/nn-executor.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-network.o: src/nn/nn-network.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +llamafile-sgemm.o: src/nn/llamafile/sgemm.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-cpu-ops.o: src/nn/nn-cpu-ops.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-cpu.o: src/nn/nn-cpu.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +nn-cpu-test: src/nn/nn-cpu-test.cpp nn-quants.o nn-core.o nn-executor.o llamafile-sgemm.o nn-cpu-ops.o nn-cpu.o + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) +nn-cpu-ops-test: src/nn/nn-cpu-ops-test.cpp nn-quants.o nn-core.o nn-executor.o llamafile-sgemm.o nn-cpu.o + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) +nn-vulkan.o: src/nn/nn-vulkan.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ + +ifdef DLLAMA_VULKAN +VULKAN_SHADER_SRCS := $(wildcard src/nn/vulkan/*.comp) +VULKAN_SHADER_BINS := $(VULKAN_SHADER_SRCS:.comp=.spv) +DEPS += $(VULKAN_SHADER_BINS) + +%.spv: %.comp + $(CGLSLC) -c $< -o $@ --target-env=vulkan1.2 +nn-vulkan-test: src/nn/nn-vulkan-test.cpp nn-quants.o nn-core.o nn-executor.o nn-vulkan.o ${DEPS} + $(CXX) $(CXXFLAGS) $(filter-out %.spv, $^) -o $@ $(LIBS) +endif + +# llm +tokenizer.o: src/tokenizer.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +llm.o: src/llm.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +app.o: src/app.cpp + $(CXX) $(CXXFLAGS) -c $^ -o $@ +tokenizer-test: src/tokenizer-test.cpp nn-quants.o nn-core.o llamafile-sgemm.o nn-cpu-ops.o tokenizer.o + $(CXX) $(CXXFLAGS) $^ -o $@ $(LIBS) +dllama: src/dllama.cpp nn-quants.o nn-core.o nn-executor.o nn-network.o llamafile-sgemm.o nn-cpu-ops.o nn-cpu.o tokenizer.o llm.o app.o ${DEPS} + $(CXX) $(CXXFLAGS) $(filter-out %.spv, $^) -o $@ $(LIBS) +dllama-api: src/dllama-api.cpp nn-quants.o nn-core.o nn-executor.o nn-network.o llamafile-sgemm.o nn-cpu-ops.o nn-cpu.o tokenizer.o llm.o app.o ${DEPS} + $(CXX) $(CXXFLAGS) $(filter-out %.spv, $^) -o $@ $(LIBS) diff --git a/README.md b/README.md new file mode 100644 index 0000000..960e180 --- /dev/null +++ b/README.md @@ -0,0 +1,142 @@ +![Distributed Llama](.github/cover.png) + +# Distributed Llama + +[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/b4rtaz/distributed-llama/.github%2Fworkflows%2Fmain.yml?style=flat-square)](https://github.com/b4rtaz/distributed-llama/actions) [![License: MIT](https://img.shields.io/github/license/mashape/apistatus.svg?style=flat-square)](/LICENSE) [![Discord](https://discordapp.com/api/guilds/1245814812353495070/widget.png?style=shield)](https://n4no.com/projects/distributedLlama/discord.php) + +Connect home devices into a powerful cluster to accelerate LLM inference. More devices mean faster performance, leveraging tensor parallelism and high-speed synchronization over Ethernet. + +Supports Linux, macOS, and Windows. Optimized for ARM and x86_64 AVX2 CPUs. + +**How to Run** +- [๐Ÿ’ป How to Run on Linux, MacOS or Windows](./docs/HOW_TO_RUN_LINUX_MACOS_WIN.md) +- [๐Ÿ“ How to Run on Raspberry Pi](./docs/HOW_TO_RUN_RASPBERRYPI.md) +- [๐Ÿง  How to Run on GPU](./docs/HOW_TO_RUN_GPU.md) + +**News** +- 16 Sep 2025 - Qwen 3 MoE models are now supported on Vulkan. +- 5 Sep 2025 - Qwen 3 MoE models are now supported on CPU. +- 3 Aug 2025 - Qwen 3 0.6B, 1.7B, 8B and 14B models are now supported. +- 23 Mar 2025 - [๐ŸŒ‹ Experimental Vulkan support](https://github.com/b4rtaz/distributed-llama/releases/tag/v0.13.0) +- 12 Feb 2025 - ๐Ÿšง Merged the [fundamental codebase refactor](https://github.com/b4rtaz/distributed-llama/releases/tag/v0.12.0) +- 9 Jan 2025 - [๐ŸŽ Llama 3.3 70B on 4 x Mac Mini M4 Pro 24GB RAM](https://github.com/b4rtaz/distributed-llama/discussions/147) + +### ๐Ÿ”ฅ Setup Root Node by Single Command + +Python 3 and C++ compiler required. The command will download the model and the tokenizer. + +| Model | Size | Command | +| --------------------------------- | -------- | ---------------------------------------------------- | +| Llama 3.1 8B Instruct Q40 | 6.32 GB | `python launch.py llama3_1_8b_instruct_q40` | +| Llama 3.1 405B Instruct Q40 | 238 GB | `python launch.py llama3_1_405b_instruct_q40`. | +| Llama 3.2 1B Instruct Q40 | 1.7 GB | `python launch.py llama3_2_1b_instruct_q40` | +| Llama 3.2 3B Instruct Q40 | 3.4 GB | `python launch.py llama3_2_3b_instruct_q40` | +| Llama 3.3 70B Instruct Q40 | 40 GB | `python launch.py llama3_3_70b_instruct_q40` | +| DeepSeek R1 Distill Llama 8B Q40 | 6.32 GB | `python launch.py deepseek_r1_distill_llama_8b_q40` | +| Qwen 3 0.6B Q40 | 0.9 GB | `python launch.py qwen3_0.6b_q40` | +| Qwen 3 1.7B Q40 | 2.2 GB | `python launch.py qwen3_1.7b_q40` | +| Qwen 3 8B Q40 | 6.7 GB | `python launch.py qwen3_8b_q40` | +| Qwen 3 14B Q40 | 10.9 GB | `python launch.py qwen3_14b_q40` | +| Qwen 3 30B A3B Q40 | 17.0 GB | `python launch.py qwen3_30b_a3b_q40` | + +### ๐Ÿ› ๏ธ Convert Model Manually + +* [๐Ÿค— How to Convert Hugging Face Model](./docs/HOW_TO_CONVERT_HF_MODEL.md) + +### ๐Ÿšง Known Limitations + +* You can run Distributed Llama only on 1, 2, 4... 2^n nodes. +* The maximum number of nodes is equal to the number of KV heads in the model [#70](https://github.com/b4rtaz/distributed-llama/issues/70). +* Only the following quantizations are supported [#183](https://github.com/b4rtaz/distributed-llama/issues/183): + * `q40` model with `q80` `buffer-float-type` + * `f32` model with `f32` `buffer-float-type` + +### ๐Ÿ‘ท Architecture + +```` +[๐Ÿ”€ SWITCH OR ROUTER] + | | | | + | | | |_______ ๐Ÿ”ธ device1 (ROOT) 10.0.0.1 + | | |_________ ๐Ÿ”น device2 (WORKER 1) 10.0.0.2:9999 + | |___________ ๐Ÿ”น device3 (WORKER 2) 10.0.0.3:9999 + |_____________ ๐Ÿ”น device4 (WORKER 3) 10.0.0.4:9999 + ... +```` + +The project is split up into two parts: +* **๐Ÿ”ธ Root node** - it's responsible for loading the model and weights and forward them to workers. Also, it synchronizes the state of the neural network. The root node is also a worker, it processes own slice of the neural network. +* **๐Ÿ”น Worker node** - it processes own slice of the neural network. It doesn't require any configuration related to the model. + +You always need the root node and you can add 2^n - 1 worker nodes to speed up the inference. The RAM usage of the neural network is split up across all nodes. The root node requires a bit more RAM than worker nodes. + +### ๐ŸŽน Commands + +* `dllama inference` - run the inference with a simple benchmark, +* `dllama chat` - run the CLI chat, +* `dllama worker` - run the worker node, +* `dllama-api` - run the API server. + +
+ +๐ŸŽน Supported Arguments + +
Inference, Chat, API + +| Argument | Description | Example | +| ---------------------------- | ---------------------------------------------------------------- | -------------------------------------- | +| `--model ` | Path to model. | `dllama_model_meta-llama-3-8b_q40.m` | +| `--tokenizer ` | Tokenizer to model. | `dllama_tokenizer_llama3.t` | +| `--buffer-float-type ` | Float precision of synchronization. | `q80` | +| `--workers ` | Addresses of workers (ip:port), separated by space. | `10.0.0.1:9999 10.0.0.2:9999` | +| `--max-seq-len ` | The maximum sequence length, it helps to reduce the RAM usage. | `4096` | + +Inference, Chat, Worker, API + +| Argument | Description | Example | +| ---------------------------- | --------------------------------------------------------------------- | ----------------------------------- | +| `--nthreads ` | Amount of threads. Don't set a higher value than number of CPU cores. | `4` | + +Worker, API + +| Argument | Description | Example | +| ---------------------------- | --------------------------------- | ----------------- | +| `--port ` | Binding port. | `9999` | + +Inference + +| Argument | Description | Example | +| ---------------------------- | ------------------------------ | ------------------ | +| `--prompt ` | Initial prompt. | `"Hello World"` | +| `--steps ` | Number of tokens to generate. | `256` | + +
+ +## ๐Ÿ“Š Measurements + +Please check the [discussions](https://github.com/b4rtaz/distributed-llama/discussions) section, where many measurements were published on different configurations. + +## โœ‹ Contribution + +Feel free to contribute to this project. For small changes, simply create a new merge request. For larger changes, please create an issue to discuss your plans. Please follow these guidelines when contributing: + +* Make only minimal changes and avoid modifying files that are not necessary. +* Ensure the code is compatible across all supported systems and CPUs. +* This repository is maintained in English. + +## ๐Ÿ’ก License + +This project is released under the MIT license. + +## ๐Ÿ“– Citation + +``` +@misc{dllama, + author = {Bartล‚omiej Tadych}, + title = {Distributed Llama}, + year = {2024}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\url{https://github.com/b4rtaz/distributed-llama}}, + commit = {7eb77ca93ec0d502e28d36b6fb20039b449cbea4} +} +``` diff --git a/converter/.gitignore b/converter/.gitignore new file mode 100644 index 0000000..c2ea6ab --- /dev/null +++ b/converter/.gitignore @@ -0,0 +1,4 @@ +*.t +*.m +*.bin +*/ diff --git a/converter/convert-hf.py b/converter/convert-hf.py new file mode 100644 index 0000000..9bc6152 --- /dev/null +++ b/converter/convert-hf.py @@ -0,0 +1,265 @@ +import gc +import json +import sys +import os +from writer import parseFloatType, writeTensor, writeHeader, FloatType +from safetensors import safe_open + +class ArchType: + LLAMA = 0xABCD00 + QWEN3 = 0xABCD01 + QWEN3_MOE = 0xABCD02 + +def permute(tensor, nHeads: int, nKvHeads: int): + if nHeads != nKvHeads: + nHeads = nKvHeads + return (tensor.reshape(nHeads, 2, tensor.shape[0] // nHeads // 2, *tensor.shape[1:]).swapaxes(1, 2).reshape(tensor.shape)) + +class Processor: + def __init__(self, config): + self.config = config + self.archType = config['arch_type'] + self.currentModelIndex = None + self.currentModel = None + self.currentModelKeys = None + self.layerMap = {} + self.plan = [] + + def __unloadModel(self): + if self.currentModel: + del self.currentModel + self.currentModel = None + gc.collect() + self.currentModelIndex = None + + def __loadModel(self, index: int): + if (self.currentModelIndex == index): + return + self.__unloadModel() + filePath = self.config['files'][index] + fileName = os.path.basename(filePath) + print(f'๐Ÿ’ฟ Loading file {fileName}...') + self.currentModel = safe_open(filePath, framework='pt', device='cpu') + self.currentModelKeys = list(self.currentModel.keys()) + for key in self.currentModelKeys: + self.layerMap[key] = index + print(f'Found {len(self.currentModelKeys)} layers') + self.currentModelIndex = index + + def __transformQ(self, tensor): + if self.archType == ArchType.LLAMA: + return permute(tensor, self.config['n_heads'], self.config['n_heads']) + return tensor + + def __transformK(self, tensor): + if self.archType == ArchType.LLAMA: + return permute(tensor, self.config['n_heads'], self.config['n_kv_heads']) + return tensor + + def __preparePlan(self): + wt = self.config['weights_float_type'] + p = self.plan + p.append([FloatType.F32, + 'model.embed_tokens.weight']) + for l in range(0, self.config['n_layers']): + p.append([wt, self.__transformQ, + f'model.layers.{l}.self_attn.q_proj.weight']) + p.append([wt, self.__transformK, + f'model.layers.{l}.self_attn.k_proj.weight']) + p.append([wt, + f'model.layers.{l}.self_attn.v_proj.weight']) + p.append([wt, + f'model.layers.{l}.self_attn.o_proj.weight']) + + if (self.config['n_experts'] > 0): + p.append([FloatType.F32, f'model.layers.{l}.mlp.gate.weight']) + for e in range(self.config['n_experts']): + p.append([wt, + f'model.layers.{l}.mlp.experts.{e}.gate_proj.weight']) + p.append([wt, + f'model.layers.{l}.mlp.experts.{e}.down_proj.weight']) + p.append([wt, + f'model.layers.{l}.mlp.experts.{e}.up_proj.weight']) + else: + p.append([wt, + f'model.layers.{l}.mlp.gate_proj.weight']) + p.append([wt, + f'model.layers.{l}.mlp.down_proj.weight']) + p.append([wt, + f'model.layers.{l}.mlp.up_proj.weight']) + + if (self.archType == ArchType.QWEN3 or self.archType == ArchType.QWEN3_MOE): + p.append([FloatType.F32, + f'model.layers.{l}.self_attn.q_norm.weight']) + p.append([FloatType.F32, + f'model.layers.{l}.self_attn.k_norm.weight']) + + p.append([FloatType.F32, + f'model.layers.{l}.input_layernorm.weight']) + p.append([FloatType.F32, + f'model.layers.{l}.post_attention_layernorm.weight']) + p.append([FloatType.F32, + 'model.norm.weight']) + p.append([wt, + 'lm_head.weight', 'model.embed_tokens.weight']) + + def write(self, outputFile: str): + self.__preparePlan() + + # Loading the last model file to get the layer names + self.__loadModel(len(self.config['files']) - 1) + self.__unloadModel() + + for planItem in self.plan: + lookup = planItem[1:] + transform = None + if (callable(lookup[0])): + transform = lookup[0] + lookup = lookup[1:] + + if (self.currentModelIndex == None): + modelIndex = 0 + else: + modelIndex = None + for layerName in lookup: + if (layerName in self.layerMap): + modelIndex = self.layerMap[layerName] + break + if (modelIndex is None): + modelIndex = self.currentModelIndex + 1 + self.__loadModel(modelIndex) + + tensor = None + for layerName in lookup: + if (layerName in self.currentModelKeys): + tensor = self.currentModel.get_tensor(layerName) + break + if tensor is None: + raise Exception(f'Layer {lookup[0]} not found') + print(f'๐Ÿ”ถ Writing tensor {layerName} {tensor.shape}...') + + floatType = planItem[0] + if (transform): + tensor = transform(tensor) + writeTensor(outputFile, tensor, floatType) + +def parseArchType(type: str): + archType = { + 'llama': ArchType.LLAMA, + 'mistral': ArchType.LLAMA, + 'qwen3': ArchType.QWEN3, + 'qwen3_moe': ArchType.QWEN3_MOE, + }.get(type) + if (archType is None): + raise Exception(f'Unsupported arch type: {type}') + return archType + +def parseHiddenAct(act: str): + hiddenAct = { + 'gelu': 0, + 'silu': 1 + }.get(act) + if (hiddenAct is None): + raise Exception(f'Unsupported hidden act: {act}') + return hiddenAct + +def parseRopeType(rt: str): + ropeType = { + 'llama3': 2, # LLAMA3_1 + }.get(rt) + if (ropeType is None): + raise Exception(f'Unsupported rope type: {ropeType}') + return ropeType + +def parseRmsNormEpsilon(epsilon: float): + if (epsilon == 1e-05): + return 5 + elif (epsilon == 1e-06): + return 6 + raise Exception(f'Unsupported epsilon: {epsilon}') + +def loadConfig(folderPath: str, weightsFloatType: int): + allFiles = os.listdir(folderPath) + allFiles.sort() + with open(os.path.join(folderPath, 'config.json')) as fc: + config = json.load(fc) + files = [] + for fileName in allFiles: + if fileName.endswith('.safetensors') and not fileName.startswith('.'): + files.append(os.path.join(folderPath, fileName)) + if (len(files) == 0): + raise Exception('Not found any model file') + + result = { + 'version': 0, + 'arch_type': parseArchType(config['model_type']), + 'hidden_act': parseHiddenAct(config['hidden_act']), + 'dim': config['hidden_size'], + 'hidden_dim': config['intermediate_size'], + 'n_layers': config['num_hidden_layers'], + 'n_heads': config['num_attention_heads'], + 'n_kv_heads': config['num_key_value_heads'], + 'weights_float_type': weightsFloatType, + 'max_seq_len': config['max_position_embeddings'], + 'vocab_size': config['vocab_size'], + 'files': files, + } + + nExperts = config.get('num_experts') + nActiveExperts = config.get('num_experts_per_tok') + result['n_experts'] = int(nExperts) if nExperts is not None else 0 + result['n_active_experts'] = int(nActiveExperts) if nActiveExperts is not None else 0 + + ropeTheta = config.get('rope_theta') + if (ropeTheta is not None): + result['rope_theta'] = int(ropeTheta) + + ropeScaling = config.get('rope_scaling') + if (ropeScaling is not None): + result['rope_scaling_factor'] = int(ropeScaling['factor']) + result['rope_scaling_low_freq_factor'] = int(ropeScaling['low_freq_factor']) + result['rope_scaling_high_freq_factory'] = int(ropeScaling['high_freq_factor']) + result['rope_scaling_orig_max_seq_len'] = int(ropeScaling['original_max_position_embeddings']) + result['rope_type'] = parseRopeType(ropeScaling['rope_type']) + + headDim = config.get('head_dim') + if (headDim is not None): + result['head_dim'] = headDim + + rmsNormEps = config.get('rms_norm_eps') + if (rmsNormEps is not None): + result['norm_epsilon'] = parseRmsNormEpsilon(rmsNormEps) + + moeHiddenDim = config.get('moe_intermediate_size') + if (moeHiddenDim is not None): + result['moe_hidden_dim'] = int(moeHiddenDim) + return result + +def printUsage(): + print('Usage: python convert-hf.py ') + print() + print('Options:') + print(' The path to the folder containing the model files') + print(' The float type of the weights (e.g. "q40")') + print(' The name of the model (e.g. "llama3")') + +if __name__ == '__main__': + if (len(sys.argv) < 4): + printUsage() + exit(1) + + sourceFolderPath = sys.argv[1] + weightsFloatType = parseFloatType(sys.argv[2]) + name = sys.argv[3] + outputFileName = f'dllama_model_{name}_{sys.argv[2]}.m' + + print(f'Output file: {outputFileName}') + + config = loadConfig(sourceFolderPath, weightsFloatType) + + with open(outputFileName, 'wb') as outputFile: + writeHeader(outputFile, config) + processor = Processor(config) + processor.write(outputFile) + + print(f'โœ… {outputFileName} created successfully') \ No newline at end of file diff --git a/converter/convert-llama.py b/converter/convert-llama.py new file mode 100644 index 0000000..31fae60 --- /dev/null +++ b/converter/convert-llama.py @@ -0,0 +1,121 @@ +import os +import sys +import json +import torch +import math +import numpy as np +from writer import writeTensor, writeHeader, parseFloatType, strFloatType, FloatType +from pathlib import Path + +LAYER_CHUNK_SIZE = 48 + +def convert(modelPath, outputPath, targetFloatType): + paramsPath = os.path.join(modelPath, 'params.json') + with open(paramsPath) as f: + params = json.load(f) + if (params['vocab_size'] < 1): + raise Exception('vocab_size is invalid, please update params.json file') + if (params.get('max_seq_len') is None): + raise Exception('max_seq_len is required, please update params.json file') + params['n_kv_heads'] = params.get('n_kv_heads') or params['n_heads'] + params['head_size'] = params['dim'] / params['n_heads'] + params['arch_type'] = 0xABCD00 + params['n_experts'] = 0 + params['n_active_experts'] = 0 + params['weights_float_type'] = targetFloatType + if ('rope_theta' in params): + params['rope_theta'] = int(params['rope_theta']) + + modelPaths = sorted(list(Path(modelPath).glob('consolidated.*.pth'))) + nSlices = len(modelPaths) + + layers = [] + layers.append('tok_embeddings.weight') + for layerIndex in range(0, params['n_layers']): + layers.append(f'layers.{layerIndex}.attention.wq.weight') + layers.append(f'layers.{layerIndex}.attention.wk.weight') + layers.append(f'layers.{layerIndex}.attention.wv.weight') + layers.append(f'layers.{layerIndex}.attention.wo.weight') + layers.append(f'layers.{layerIndex}.feed_forward.w1.weight') + layers.append(f'layers.{layerIndex}.feed_forward.w2.weight') + layers.append(f'layers.{layerIndex}.feed_forward.w3.weight') + layers.append(f'layers.{layerIndex}.attention_norm.weight') + layers.append(f'layers.{layerIndex}.ffn_norm.weight') + layers.append('norm.weight') + layers.append('output.weight') + + isHeaderWrote = False + outFile = open(outputPath, 'wb') + + nChunks = math.ceil(len(layers) / LAYER_CHUNK_SIZE) + for chunkIndex in range(0, nChunks): + chunkLayerNames = layers[LAYER_CHUNK_SIZE * chunkIndex:LAYER_CHUNK_SIZE * (chunkIndex + 1)] + models = {} + for layerName in chunkLayerNames: + models[layerName] = [] + + print(f'๐Ÿ’ฟ Chunking model {chunkIndex + 1}/{nChunks}...') + + for modelPath in modelPaths: + model = torch.load(modelPath, map_location='cpu') + for modelKey in model: + if (modelKey in chunkLayerNames): + models[modelKey].append(model[modelKey]) + if not isHeaderWrote: + params['hidden_dim'] = model['layers.0.feed_forward.w1.weight'].shape[0] * nSlices + writeHeader(outFile, params) + isHeaderWrote = True + del model + + for layerName in chunkLayerNames: + if layerName == 'rope.freqs': + continue + + isAxis1 = ( + layerName == 'tok_embeddings.weight' or + layerName.endswith('.attention.wo.weight') or + layerName.endswith('.feed_forward.w2.weight') + ) + isAlwaysF32 = ( + layerName == 'tok_embeddings.weight' or + layerName.endswith('.attention_norm.weight') or + layerName.endswith('.ffn_norm.weight') or + layerName == 'norm.weight' + ) + floatType = FloatType.F32 if isAlwaysF32 else targetFloatType + + tensors = models[layerName] + if len(tensors) == 1 or len(tensors[0].shape) == 1: + tensor = tensors[0] + else: + tensor = torch.cat(tensors, dim=(1 if isAxis1 else 0)) + + print(f'๐Ÿ”ถ Exporting {layerName} {tensor.shape}...') + writeTensor(outFile, tensor, floatType) + + del models + + outFile.close() + +def usage(): + print('Usage: python convert-llama.py ') + exit(1) + +if __name__ == '__main__': + if (len(sys.argv) < 3): + usage() + + modelPath = sys.argv[1] + targetFloatType = parseFloatType(sys.argv[2]) + targetFloatTypeStr = strFloatType(targetFloatType) + + modelName = os.path.basename(modelPath) + outputFileName = f'dllama_model_{modelName.lower()}_{targetFloatTypeStr}.m' + + print(f'Model name: {modelName}') + print(f'Target float type: {targetFloatTypeStr}') + print(f'Target file: {outputFileName}') + + convert(modelPath, outputFileName, targetFloatType) + + print('Done!') diff --git a/converter/convert-tokenizer-hf.py b/converter/convert-tokenizer-hf.py new file mode 100644 index 0000000..e9c8e0e --- /dev/null +++ b/converter/convert-tokenizer-hf.py @@ -0,0 +1,137 @@ +import sys +import json +import os +from sentencepiece import SentencePieceProcessor +from transformers import PreTrainedTokenizerFast +writer = __import__('tokenizer-writer') + +def openJson(path): + with open(path, 'r', encoding='utf-8') as file: + return json.load(file) + +def unicodeToBytes(): + # https://github.com/openai/gpt-2/blob/9b63575ef42771a015060c964af2c3da4cf7c8ab/src/encoder.py#L9 + bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("ยก"), ord("ยฌ") + 1)) + list(range(ord("ยฎ"), ord("รฟ") + 1)) + cs = bs[:] + n = 0 + for b in range(2 ** 8): + if b not in bs: + bs.append(b) + cs.append(2 ** 8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(cs, bs)) + +class TokensResolver: + def __init__(self, dirPath, tokenizerConfig): + self.dirPath = dirPath + self.tokenizerConfig = tokenizerConfig + self.bosId = None + self.eosIds = None + self.tokens = [] + self.scores = [] + + def resolvePreTrainedTokenizerFast(self): + utb = unicodeToBytes() + tokenizer = PreTrainedTokenizerFast(tokenizer_file = os.path.join(self.dirPath, 'tokenizer.json')) + vocabLen = len(tokenizer.get_vocab()) + for i in range(vocabLen): + tokenChars = list(tokenizer.convert_ids_to_tokens([i])[0]) + tokenBytes = [] + for chr in tokenChars: + if (chr in utb): + tokenBytes.append(utb[chr]) + else: + tokenBytes += list(chr.encode('utf-8')) + self.tokens.append(bytes(tokenBytes)) + self.scores.append(-float(i)) + + self.bosId = tokenizer.bos_token_id + if (tokenizer.eos_token_id): + self.eosIds = [tokenizer.eos_token_id] + if (self.bosId is None or self.eosId is None): + config = openJson(os.path.join(self.dirPath, 'config.json')) + if (self.bosId is None): + self.bosId = config['bos_token_id'] + if (self.eosIds is None): + self.eosIds = config['eos_token_id'] + if isinstance(self.eosIds, list): + self.eosIds = self.eosIds + else: + self.eosIds = [self.eosIds] + + def resolveLlamaTokenizer(self): + modelPath = os.path.join(self.dirPath, 'tokenizer.model') + processor = SentencePieceProcessor(model_file=modelPath) + + assert processor.vocab_size() == processor.get_piece_size() + self.bosId = processor.bos_id() + self.eosIds = [processor.eos_id()] + vocabSize = processor.vocab_size() + for i in range(vocabSize): + t = processor.id_to_piece(i) + s = processor.get_score(i) + t = t.replace('โ–', ' ') # sentencepiece uses this character as whitespace + # Check for byte characters + if len(t) == 6 and t.startswith('<0x') and t.endswith('>'): + # For example, "<0x0A>"" is a newline character + b = bytearray.fromhex(t[3:-1]) + else: + b = t.encode('utf-8') + self.tokens.append(b) + self.scores.append(s) + + def resolve(self): + cls = self.tokenizerConfig['tokenizer_class'] + if (cls == 'PreTrainedTokenizerFast' or + cls == 'LlamaTokenizerFast' or + cls == 'Qwen2Tokenizer'): + return self.resolvePreTrainedTokenizerFast() + if (cls == 'LlamaTokenizer'): + return self.resolveLlamaTokenizer() + raise Exception(f'Tokenizer {cls} is not supported') + +def printUsage(): + print('Usage: python convert-tokenizer-hf.py ') + print() + print('Options:') + print(' The path to the folder with tokenizer_config.json') + print(' The name of the tokenizer (e.g. "llama3")') + +if __name__ == '__main__': + if (len(sys.argv) < 2): + printUsage() + exit(1) + + dirPath = sys.argv[1] + name = sys.argv[2] + tokenizerConfig = openJson(os.path.join(dirPath, 'tokenizer_config.json')) + + resolver = TokensResolver(dirPath, tokenizerConfig) + resolver.resolve() + + if (resolver.bosId is None or resolver.eosIds is None): + raise Exception('Cannot resolve bosId or eosIds') + print(f'bosId: {resolver.bosId} ({resolver.tokens[resolver.bosId]})') + for eosId in resolver.eosIds: + print(f'eosId: {eosId} ({resolver.tokens[eosId]})') + + chatTemplate = None + if ('chat_template' in tokenizerConfig): + chatTemplate = tokenizerConfig['chat_template'].encode('utf-8') + + addBos = True + if ('add_bos_token' in tokenizerConfig): + addBos = tokenizerConfig['add_bos_token'] + + outputFileName = f'dllama_tokenizer_{name}.t' + with open(outputFileName, 'wb') as outputFile: + writer.writeTokenizer( + outputFile, + resolver.tokens, + resolver.scores, + chatTemplate, + resolver.bosId, + addBos, + resolver.eosIds) + print(f'โœ… Created {outputFileName}') diff --git a/converter/convert-tokenizer-llama2.py b/converter/convert-tokenizer-llama2.py new file mode 100644 index 0000000..fa97b20 --- /dev/null +++ b/converter/convert-tokenizer-llama2.py @@ -0,0 +1,44 @@ +import sys +import os +from sentencepiece import SentencePieceProcessor +writer = __import__('tokenizer-writer') + +chatTemplate = "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}" + +def printUsage(): + print('Usage: python convert-tokenizer-llama2.py ') + print() + print('Options:') + print(' The path to the folder with llama2 folder path') + +if __name__ == '__main__': + if (len(sys.argv) < 2): + printUsage() + exit(1) + + dirPath = sys.argv[1] + modelPath = os.path.join(dirPath, 'tokenizer.model') + processor = SentencePieceProcessor(model_file=modelPath) + + vocabSize = processor.vocab_size() + tokens = [] + scores = [] + for i in range(vocabSize): + t = processor.id_to_piece(i) + s = processor.get_score(i) + t = t.replace('โ–', ' ') # sentencepiece uses this character as whitespace + b = t.encode('utf-8') + tokens.append(b) + scores.append(s) + + outputFileName = 'dllama_tokenizer_llama2.t' + with open(outputFileName, 'wb') as outputFile: + writer.writeTokenizer( + outputFile, + tokens, + scores, + chatTemplate.encode('utf-8'), + processor.bos_id(), + [processor.eos_id()]) + + print(f'โœ… Created {outputFileName}') diff --git a/converter/convert-tokenizer-llama3.py b/converter/convert-tokenizer-llama3.py new file mode 100644 index 0000000..f2e7aec --- /dev/null +++ b/converter/convert-tokenizer-llama3.py @@ -0,0 +1,78 @@ +import sys +import base64 +writer = __import__('tokenizer-writer') + +# Format of input file: +# ``` +# IQ== 0 +# Ig== 1 +# Iw== 2 +# ... +# ``` + +nSpecialTokens = 256 +specialTokens = [ + '<|begin_of_text|>', + '<|end_of_text|>', + '<|reserved_special_token_0|>', + '<|reserved_special_token_1|>', + '<|reserved_special_token_2|>', + '<|reserved_special_token_3|>', + '<|start_header_id|>', + '<|end_header_id|>', + '<|reserved_special_token_4|>', + '<|eot_id|>', +] + [ + f'<|reserved_special_token_{i}|>' + for i in range(5, nSpecialTokens - 5) +] +bosId = 128000 +eosId = 128001 +chatEosId = 128009 +chatTemplate = "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}" + +def printUsage(): + print('Usage: python convert-tokenizer-llama3.py ') + print() + print('Options:') + print(' The path to the Llama 3 tokenizer model (tokenizer.model)') + +if __name__ == '__main__': + if (len(sys.argv) < 2): + printUsage() + exit(1) + + modelPath = sys.argv[1] + outputFileName = 'dllama_tokenizer_llama3.t' + + with open(modelPath, 'r') as inputFile: + with open(outputFileName, 'wb') as outputFile: + inputLines = inputFile.readlines() + nLines = len(inputLines) + + tokens = [] + scores = [] + for line in inputLines: + s = line.split(' ') + bytes = base64.b64decode(s[0]) + score = -float(s[1]) + tokens.append(bytes) + scores.append(score) + + specialTokenIndex = nLines + for token in specialTokens: + bytes = token.encode('utf-8') + score = -float(specialTokenIndex) + tokens.append(bytes) + scores.append(score) + specialTokenIndex += 1 + + writer.writeTokenizer( + outputFile, + tokens, + scores, + chatTemplate.encode('utf-8'), + bosId, + [eosId, chatEosId]) + + print(f'โœ… Created {outputFileName}') diff --git a/converter/requirements.txt b/converter/requirements.txt new file mode 100644 index 0000000..221c48d --- /dev/null +++ b/converter/requirements.txt @@ -0,0 +1,5 @@ +python>=3.9 +numpy==1.23.5 +pytorch==2.0.1 +safetensors==0.4.2 +sentencepiece==0.1.99 \ No newline at end of file diff --git a/converter/tokenizer-writer.py b/converter/tokenizer-writer.py new file mode 100644 index 0000000..e01b7d0 --- /dev/null +++ b/converter/tokenizer-writer.py @@ -0,0 +1,57 @@ +import struct + +def writeTokenizer(file, tokens, scores, chatTemplate, bosId, addBos, eosTokens): + headerKeys = { + 'version': 0, + 'vocab_size': 1, + 'max_token_length': 2, + 'bos_id': 3, + 'chat_template': 7, + 'n_eos_tokens': 9, + 'add_bos': 10, + } + header = struct.pack('i', 0x567124) + + nTokens = len(tokens) + maxTokenLength = max(len(t) for t in tokens) + + params = {} + params['bos_id'] = bosId + params['version'] = 1 + params['vocab_size'] = nTokens + params['max_token_length'] = maxTokenLength + if (chatTemplate): + params['chat_template'] = len(chatTemplate) + params['n_eos_tokens'] = len(eosTokens) + params['add_bos'] = 1 if addBos else 0 + + data = b'' + for key in params: + value = params[key] + if value is None: + continue + if key in headerKeys: + data += struct.pack('ii', headerKeys[key], params[key]) + else: + print(f'Unknown header key: {key}') + + print('โญ Params:') + print(params) + if (chatTemplate): + print('โญ Chat template:') + print(chatTemplate) + + header += struct.pack('i', len(header) * 2 + len(data)) + file.write(header) + file.write(data) + if chatTemplate: + file.write(chatTemplate) + + for eosToken in eosTokens: + file.write(struct.pack('i', eosToken)) + + for i in range(0, nTokens): + size = len(tokens[i]) + assert(size > 0) + file.write(struct.pack('fI', scores[i], size)) + file.write(tokens[i]) diff --git a/converter/writer-test.py b/converter/writer-test.py new file mode 100644 index 0000000..3a982b6 --- /dev/null +++ b/converter/writer-test.py @@ -0,0 +1,35 @@ +import sys +import time +import torch +from writer import writeQuantizedQ40Tensor + +TEMP_FILE_NAME = 'writer-test.temp' + +def readBase64FromFile(path): + with open(path, 'rb') as file: + return file.read().hex() + +def testWriteQuantizedQ40Tensor(): + EXPECTED_OUTPUT = '7e346345a692b89665b2c5790537876e598aaa366d988876a898b8d788a98868ce660c66f6b3a88cba5ce9a871987ba9cc5bcaaa760c1eb556a4455b747b6b9504968828ef2a8d7c1db5c6be3764799e66db6d8e76463126a30e4333cad7a4f645947c6cf97f9de086d468c8d535a6ba7dc799d3d0c657bab6799468cad8bb349eb7d7635c7c798998696bb38e4085a9eb34444ba96a7f8ba7b2b42d746a96cf9660aeb4499d8708ad5c7b9a7558947645f3bbb6b0346a656887ad9a86059baac5c596ab781c703569bb8a4356a4bd58cb78736ba09759bb0e34a6274e827b957d7a67dfa86846955660d234b6d9d78a378094a8a8708a7a774ae92f8a36b8c999a9b77a7d958a69747c807963941235379886d69a7a8767b3a6a4ac71999760' + + torch.manual_seed(seed=1) + tensor = torch.randn(32, 16) + + with open(TEMP_FILE_NAME, 'wb') as file: + writeQuantizedQ40Tensor(file, tensor) + + contentBase64 = readBase64FromFile(TEMP_FILE_NAME) + assert contentBase64 == EXPECTED_OUTPUT, f'Received: {contentBase64}' + print('โœ… writeQuantizedQ40Tensor') + +def runWriteQuantizedQ40TensorBenchmark(): + tensor = torch.randn(8192, 4096) + t0 = time.time() + with open(TEMP_FILE_NAME, 'wb') as file: + writeQuantizedQ40Tensor(file, tensor) + t1 = time.time() + print(f'๐Ÿ• writeQuantizedQ40Tensor: {t1 - t0:.4f}s') + +if __name__ == '__main__': + testWriteQuantizedQ40Tensor() + runWriteQuantizedQ40TensorBenchmark() diff --git a/converter/writer.py b/converter/writer.py new file mode 100644 index 0000000..13c8d3b --- /dev/null +++ b/converter/writer.py @@ -0,0 +1,148 @@ +import struct +import torch +import time +import numpy as np + +class FloatType: + F32 = 0 + F16 = 1 + Q40 = 2 + Q80 = 3 + +floatTypeMap = { + 'f32': FloatType.F32, + 'f16': FloatType.F16, + 'q40': FloatType.Q40, + 'q80': FloatType.Q80, +} +floatTypeNames = list(floatTypeMap.keys()) + +def parseFloatType(type): + floatType = floatTypeMap.get(type) + if floatType is not None: + return floatType + raise Exception(f'{type} is not supported') + +def strFloatType(type): + return floatTypeNames[type] + +def writeQuantizedQ40Tensor(file, x): + x = x.to(torch.float32).numpy().astype(np.float32) + blockSize = 32 + blockHalfSize = blockSize // 2 + assert(x.shape[0] % blockSize == 0) + groups = x.reshape(-1, blockSize) + gmax = np.max(groups, axis=1) + gmin = np.min(groups, axis=1) + deltas = np.divide(np.where(-gmin > gmax, gmin, gmax), -8) + deltas16 = deltas.astype(np.float16) + ids = np.where(deltas != 0, 1.0 / deltas, 0) + groups = np.add(groups * ids[:, np.newaxis], 8.5) + groups = np.clip(groups, 0, 15).astype(int) + + gLow = groups[:, :blockHalfSize] & 0xF + gHigh = (groups[:, blockHalfSize:] & 0xF) << 4 + gCombined = gLow | gHigh + + nBytes = 0 + for groupIndex in range(0, len(groups)): + delta16 = deltas16[groupIndex] + buffer = struct.pack(f'e{blockHalfSize}B', delta16, *gCombined[groupIndex]) + file.write(buffer) + nBytes += len(buffer) + return nBytes + +def writeQuantizedQ80Tensor(file, x): + x = x.to(torch.float32).numpy().astype(np.float32) + blockSize = 32 + assert(x.shape[0] % blockSize == 0) + groups = x.reshape(-1, blockSize) + gmax = np.max(groups, axis=1) + gmin = np.min(groups, axis=1) + gabsMax = np.where(-gmin > gmax, -gmin, gmax) + deltas = gabsMax / ((1 << 7) - 1) + deltas16 = deltas.astype(np.float16) + ids = np.where(deltas != 0, 1.0 / deltas, 0) + groups = groups * ids[:, np.newaxis] + groups8 = np.round(groups).astype(np.int8) + + nBytes = 0 + for groupIndex in range(0, len(groups)): + buffer = struct.pack(f'e{blockSize}b', deltas16[groupIndex], *groups8[groupIndex]) + file.write(buffer) + nBytes += len(buffer) + return nBytes + +def writeF32Tensor(file, d): + chunkSize = 10000 + nBytes = 0 + for i in range(0, len(d), chunkSize): + chunk = d[i:i+chunkSize].to(torch.float32).numpy().astype(np.float32) + b = struct.pack(f'{len(chunk)}f', *chunk) + nBytes += len(b) + file.write(b) + return nBytes + +def writeF16Tensor(file, d): + d = d.to(torch.float16).numpy().astype(np.float16) + b = struct.pack(f'{len(d)}e', *d) + file.write(b) + return len(b) + +def writeTensor(file, tensor, floatType): + d = tensor.detach().cpu().view(-1) + t0 = time.time() + nBytes = 0 + if (floatType == FloatType.F16): + nBytes = writeF16Tensor(file, d) + elif (floatType == FloatType.F32): + nBytes = writeF32Tensor(file, d) + elif (floatType == FloatType.Q40): + nBytes = writeQuantizedQ40Tensor(file, d) + elif (floatType == FloatType.Q80): + nBytes = writeQuantizedQ80Tensor(file, d) + else: + raise Exception(f'Unknown float type') + t1 = time.time() + print(f'Saved {strFloatType(floatType)} tensor in {t1 - t0:.2f}s, {nBytes} bytes') + +def writeHeader(file, params): + headerKeys = { + 'version': 0, + 'arch_type': 1, + 'dim': 2, + 'hidden_dim': 3, + 'n_layers': 4, + 'n_heads': 5, + 'n_kv_heads': 6, + 'n_experts': 7, + 'n_active_experts': 8, + 'vocab_size': 9, + 'max_seq_len': 10, + 'hidden_act': 11, + 'rope_theta': 12, + 'weights_float_type': 13, + 'rope_scaling_factor': 14, + 'rope_scaling_low_freq_factor': 15, + 'rope_scaling_high_freq_factory': 16, + 'rope_scaling_orig_max_seq_len': 17, + 'rope_type': 18, + 'head_dim': 19, + 'norm_epsilon': 20, + 'moe_hidden_dim': 21, + } + header = struct.pack('i', 0xA00ABCD) + + data = b'' + for key in params: + if key in headerKeys: + data += struct.pack('ii', headerKeys[key], params[key]) + else: + print(f'Warning: Unknown header key: {key}') + + header += struct.pack('i', len(header) * 2 + len(data)) + file.write(header) + file.write(data) + for key in params: + print(f'๐ŸŽ“ {key}: {params[key]}') + print() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..77210f9 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,81 @@ +version: '3.8' + +services: + # Controller service - downloads models and runs API + controller: + build: + context: . + dockerfile: Dockerfile.controller + ports: + - "9999:9999" + volumes: + - ./models:/app/models + networks: + distributed-llama: + ipv4_address: 172.20.0.10 + environment: + - MODEL_NAME=${MODEL_NAME:-llama3_2_3b_instruct_q40} + - NTHREADS=${CONTROLLER_NTHREADS:-4} + - MAX_SEQ_LEN=${MAX_SEQ_LEN:-4096} + - BUFFER_FLOAT_TYPE=${BUFFER_FLOAT_TYPE:-q80} + command: > + --model ${MODEL_NAME:-llama3_2_3b_instruct_q40} + --port 9999 + --nthreads ${CONTROLLER_NTHREADS:-4} + --max-seq-len ${MAX_SEQ_LEN:-4096} + --buffer-float-type ${BUFFER_FLOAT_TYPE:-q80} + --workers 172.20.0.11:9999 172.20.0.12:9999 172.20.0.13:9999 + depends_on: + - worker1 + - worker2 + - worker3 + + # Worker services + worker1: + build: + context: . + dockerfile: Dockerfile.worker + networks: + distributed-llama: + ipv4_address: 172.20.0.11 + environment: + - NTHREADS=${WORKER_NTHREADS:-4} + command: > + --port 9999 + --nthreads ${WORKER_NTHREADS:-4} + + worker2: + build: + context: . + dockerfile: Dockerfile.worker + networks: + distributed-llama: + ipv4_address: 172.20.0.12 + environment: + - NTHREADS=${WORKER_NTHREADS:-4} + command: > + --port 9999 + --nthreads ${WORKER_NTHREADS:-4} + + worker3: + build: + context: . + dockerfile: Dockerfile.worker + networks: + distributed-llama: + ipv4_address: 172.20.0.13 + environment: + - NTHREADS=${WORKER_NTHREADS:-4} + command: > + --port 9999 + --nthreads ${WORKER_NTHREADS:-4} + +networks: + distributed-llama: + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 + +volumes: + models: \ No newline at end of file diff --git a/docs/HOW_TO_CONVERT_HF_MODEL.md b/docs/HOW_TO_CONVERT_HF_MODEL.md new file mode 100644 index 0000000..f2d4bb4 --- /dev/null +++ b/docs/HOW_TO_CONVERT_HF_MODEL.md @@ -0,0 +1,32 @@ +# How to Convert ๐Ÿค— Hugging Face Model + +Currently, Distributed Llama supports these Hugging Face models: `llama`, `mistral`, `qwen3` and `qwen3_moe`. You can try to convert any compatible Hugging Face model and run it with Distributed Llama. + +> [!IMPORTANT] +> All converters are in the early stages of development. After conversion, the model may not work correctly. + +1. Download a model, for example: [Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3/tree/main). +2. The downloaded model should contain `config.json`, `tokenizer.json`, `tokenizer_config.json` and `tokenizer.model` and safetensor files. +3. Run the converter of the model: + +```sh +cd converter +python convert-hf.py path/to/hf/model q40 mistral-7b-0.3 +``` + +4. Run the converter of the tokenizer: + +```sh +python convert-tokenizer-hf.py path/to/hf/model mistral-7b-0.3 +``` + +5. That's it! Now you can run the Distributed Llama. + +```sh +./dllama inference \ + --prompt "Hello world" \ + --steps 64 \ + --model dllama_model_mistral-7b-0.3_q40.m \ + --tokenizer dllama_tokenizer_mistral-7b-0.3.t \ + --buffer-float-type q80 +``` diff --git a/docs/HOW_TO_RUN_GPU.md b/docs/HOW_TO_RUN_GPU.md new file mode 100644 index 0000000..8d84cb0 --- /dev/null +++ b/docs/HOW_TO_RUN_GPU.md @@ -0,0 +1,34 @@ +# How to Run Distributed Llama on ๐Ÿง  GPU + +Distributed Llama can run on GPU devices using Vulkan API. This article describes how to build and run the project on GPU. + +Before you start here, please check how to build and run Distributed Llama on CPU: +* [๐Ÿ“ How to Run on Raspberry Pi](./HOW_TO_RUN_RASPBERRYPI.md) +* [๐Ÿ’ป How to Run on Linux, MacOS or Windows](./HOW_TO_RUN_LINUX_MACOS_WIN.md) + +To run on GPU, please follow these steps: + +1. Install Vulkan SDK for your platform. + * Linux: please check [this article](https://vulkan.lunarg.com/doc/view/latest/linux/getting_started_ubuntu.html). + * MacOS: download SDK [here](https://vulkan.lunarg.com/sdk/home#mac). +2. Build Distributed Llama with GPU support: + +```bash +DLLAMA_VULKAN=1 make dllama +DLLAMA_VULKAN=1 make dllama-api +``` + +3. Now `dllama` and `dllama-api` binaries supports arguments related to GPU usage. + +``` +--gpu-index Use GPU device with given index (use `0` for first device) +``` + +4. You can run the root node or worker node on GPU by specifying the `--gpu-index` argument. Vulkan backend requires single thread, so you should also set `--nthreads 1`. + +```bash +./dllama inference ... --nthreads 1 --gpu-index 0 +./dllama chat ... --nthreads 1 --gpu-index 0 +./dllama worker ... --nthreads 1 --gpu-index 0 +./dllama-api ... --nthreads 1 --gpu-index 0 +``` diff --git a/docs/HOW_TO_RUN_LINUX_MACOS_WIN.md b/docs/HOW_TO_RUN_LINUX_MACOS_WIN.md new file mode 100644 index 0000000..f66d098 --- /dev/null +++ b/docs/HOW_TO_RUN_LINUX_MACOS_WIN.md @@ -0,0 +1,89 @@ +# How to Run Distributed Llama on ๐Ÿ’ป Linux, MacOS or Windows + +This article describes how to run Distributed Llama on 4 devices, but you can also run it on 1, 2, 4, 8... devices. Please adjust the commands and topology according to your configuration. + +```` +[๐Ÿ”€ SWITCH OR ROUTER] + | | | | + | | | |_______ ๐Ÿ”ธ device1 (ROOT) 10.0.0.1 + | | |_________ ๐Ÿ”น device2 (WORKER 1) 10.0.0.2:9999 + | |___________ ๐Ÿ”น device3 (WORKER 2) 10.0.0.3:9999 + |_____________ ๐Ÿ”น device4 (WORKER 3) 10.0.0.4:9999 +```` + +1. Install Git and C++ compiler on **๐Ÿ”ธ๐Ÿ”น ALL** devices: + + * Linux: + ``` + sudo apt install git build-essential + ``` + * MacOS + ``` + brew install git + ``` + * Windows + + Install Git and Mingw (via [Chocolatey](https://chocolatey.org/install)): + ```powershell + choco install git mingw + ``` + +2. Connect **๐Ÿ”ธ๐Ÿ”น ALL** devices to your **๐Ÿ”€ SWITCH OR ROUTER** via Ethernet cable. If you're using only two devices, it's better to connect them directly without a switch. + +3. Clone this repository and compile Distributed Llama on **๐Ÿ”ธ๐Ÿ”น ALL** devices: + +```sh +git clone https://github.com/b4rtaz/distributed-llama.git +cd distributed-llama +make dllama +make dllama-api +``` + +4. Download the model to the **๐Ÿ”ธ ROOT** device using the `launch.py` script. You don't need to download the model on worker devices. + +```sh +python3 launch.py # Prints a list of available models + +python3 launch.py llama3_2_3b_instruct_q40 # Downloads the model to the root device +``` + +5. Start workers on all **๐Ÿ”น WORKER** devices: + +```sh +./dllama worker --port 9999 --nthreads 4 +``` + +6. Run the inference to test if everything works fine on the **๐Ÿ”ธ ROOT** device: + +```sh +./dllama inference \ + --prompt "Hello world" \ + --steps 32 \ + --model models/llama3_2_3b_instruct_q40/dllama_model_llama3_2_3b_instruct_q40.m \ + --tokenizer models/llama3_2_3b_instruct_q40/dllama_tokenizer_llama3_2_3b_instruct_q40.t \ + --buffer-float-type q80 \ + --nthreads 4 \ + --max-seq-len 4096 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +7. To run the API server, start it on the **๐Ÿ”ธ ROOT** device: + +```sh +./dllama-api \ + --port 9999 \ + --model models/llama3_2_3b_instruct_q40/dllama_model_llama3_2_3b_instruct_q40.m \ + --tokenizer models/llama3_2_3b_instruct_q40/dllama_tokenizer_llama3_2_3b_instruct_q40.t \ + --buffer-float-type q80 \ + --nthreads 4 \ + --max-seq-len 4096 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +Now you can connect to the API server: + +``` +http://10.0.0.1:9999/v1/models +``` + +8. When the API server is running, you can open the web chat in your browser, open [llama-ui.js.org](https://llama-ui.js.org/), go to the settings and set the base URL to: `http://10.0.0.1:9999`. Press the "save" button and start chatting! diff --git a/docs/HOW_TO_RUN_RASPBERRYPI.md b/docs/HOW_TO_RUN_RASPBERRYPI.md new file mode 100644 index 0000000..502460b --- /dev/null +++ b/docs/HOW_TO_RUN_RASPBERRYPI.md @@ -0,0 +1,96 @@ +# How to Run Distributed Llama on ๐Ÿ“ Raspberry Pi + +This article describes how to run Distributed Llama on 4 Raspberry Pi devices, but you can also run it on 1, 2, 4, 8... devices. Please adjust the commands and topology according to your configuration. + +```` +[๐Ÿ”€ SWITCH OR ROUTER] + | | | | + | | | |_______ ๐Ÿ”ธ raspberrypi1 (ROOT) 10.0.0.1 + | | |_________ ๐Ÿ”น raspberrypi2 (WORKER 1) 10.0.0.2:9999 + | |___________ ๐Ÿ”น raspberrypi3 (WORKER 2) 10.0.0.3:9999 + |_____________ ๐Ÿ”น raspberrypi4 (WORKER 3) 10.0.0.4:9999 +```` + +1. Install `Raspberry Pi OS Lite (64 bit)` on your **๐Ÿ”ธ๐Ÿ”น ALL** Raspberry Pi devices. This OS doesn't have desktop environment but you can easily connect via SSH to manage it. +2. Connect **๐Ÿ”ธ๐Ÿ”น ALL** devices to your **๐Ÿ”€ SWITCH OR ROUTER** via Ethernet cable. If you're using only two devices, it's better to connect them directly without a switch. +3. Connect to all devices via SSH from your computer. + +``` +ssh user@raspberrypi1.local +ssh user@raspberrypi2.local +ssh user@raspberrypi3.local +ssh user@raspberrypi4.local +``` + +4. Install Git on **๐Ÿ”ธ๐Ÿ”น ALL** devices: + +```sh +sudo apt install git +``` + +5. Clone this repository and compile Distributed Llama on **๐Ÿ”ธ๐Ÿ”น ALL** devices: + +```sh +git clone https://github.com/b4rtaz/distributed-llama.git +cd distributed-llama +make dllama +make dllama-api +``` + +6. Download the model to the **๐Ÿ”ธ ROOT** device using the `launch.py` script. You don't need to download the model on worker devices. + +```sh +python3 launch.py # Prints a list of available models + +python3 launch.py llama3_2_3b_instruct_q40 # Downloads the model to the root device +``` + +7. Assign static IP addresses on **๐Ÿ”ธ๐Ÿ”น ALL** devices. Each device must have a unique IP address in the same subnet. + +```sh +sudo ip addr add 10.0.0.1/24 dev eth0 # ๐Ÿ”ธ ROOT +sudo ip addr add 10.0.0.2/24 dev eth0 # ๐Ÿ”น WORKER 1 +sudo ip addr add 10.0.0.3/24 dev eth0 # ๐Ÿ”น WORKER 2 +sudo ip addr add 10.0.0.4/24 dev eth0 # ๐Ÿ”น WORKER 3 +``` + +8. Start workers on all **๐Ÿ”น WORKER** devices: + +```sh +sudo nice -n -20 ./dllama worker --port 9999 --nthreads 4 +``` + +9. Run the inference to test if everything works fine on the **๐Ÿ”ธ ROOT** device: + +```sh +sudo nice -n -20 ./dllama inference \ + --prompt "Hello world" \ + --steps 32 \ + --model models/llama3_2_3b_instruct_q40/dllama_model_llama3_2_3b_instruct_q40.m \ + --tokenizer models/llama3_2_3b_instruct_q40/dllama_tokenizer_llama3_2_3b_instruct_q40.t \ + --buffer-float-type q80 \ + --nthreads 4 \ + --max-seq-len 4096 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +10. To run the API server, start it on the **๐Ÿ”ธ ROOT** device: + +```sh +sudo nice -n -20 ./dllama-api \ + --port 9999 \ + --model models/llama3_2_3b_instruct_q40/dllama_model_llama3_2_3b_instruct_q40.m \ + --tokenizer models/llama3_2_3b_instruct_q40/dllama_tokenizer_llama3_2_3b_instruct_q40.t \ + --buffer-float-type q80 \ + --nthreads 4 \ + --max-seq-len 4096 \ + --workers 10.0.0.2:9999 10.0.0.3:9999 10.0.0.4:9999 +``` + +Now you can connect to the API server from your computer: + +``` +http://raspberrypi1.local:9999/v1/models +``` + +11. When the API server is running, you can open the web chat in your browser, open [llama-ui.js.org](https://llama-ui.js.org/), go to the settings and set the base URL to: `http://raspberrypi1.local:9999`. Press the "save" button and start chatting! diff --git a/examples/chat-api-client.js b/examples/chat-api-client.js new file mode 100644 index 0000000..7606260 --- /dev/null +++ b/examples/chat-api-client.js @@ -0,0 +1,49 @@ +// This is a simple client for dllama-api. +// +// Usage: +// +// 1. Start the server, how to do it is described in the `src/apps/dllama-api/README.md` file. +// 2. Run this script: `node examples/chat-api-client.js` + +const HOST = process.env.HOST ? process.env.HOST : '127.0.0.1'; +const PORT = process.env.PORT ? Number(process.env.PORT) : 9990; + +async function chat(messages, maxTokens) { + const response = await fetch(`http://${HOST}:${PORT}/v1/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + messages, + temperature: 0.7, + stop: ['<|eot_id|>'], + max_tokens: maxTokens + }), + }); + return await response.json(); +} + +async function ask(system, user, maxTokens) { + console.log(`> system: ${system}`); + console.log(`> user: ${user}`); + const response = await chat([ + { + role: 'system', + content: system + }, + { + role: 'user', + content: user + } + ], maxTokens); + console.log(response.usage); + console.log(response.choices[0].message.content); +} + +async function main() { + await ask('You are an excellent math teacher.', 'What is 1 + 2?', 128); + await ask('You are a romantic.', 'Where is Europe?', 128); +} + +main(); diff --git a/examples/macbeth.sh b/examples/macbeth.sh new file mode 100644 index 0000000..e549eb1 --- /dev/null +++ b/examples/macbeth.sh @@ -0,0 +1,200 @@ +#!/bin/bash + +# This is a simple test of generating a sequence that fulfills the KV cache. +# +# Used model & tokenizer: https://huggingface.co/b4rtaz/llama-3-8b-distributed-llama +# Probably, this test will be working correctly only on MacBook Pro M1, due to differences in float multiplication on different CPUs. + +cd "$(dirname "$0")" +cd .. + +# Source: https://www.opensourceshakespeare.org/views/plays/play_view.php?WorkID=macbeth&Scope=entire +PROMPT="Duncan. What bloody man is that? He can report, +As seemeth by his plight, of the revolt +The newest state. 20 + +Malcolm. This is the sergeant +Who like a good and hardy soldier fought +'Gainst my captivity. Hail, brave friend! +Say to the king the knowledge of the broil +As thou didst leave it. 25 + +Sergeant. Doubtful it stood; +As two spent swimmers, that do cling together +And choke their art. The merciless Macdonwaldโ€” +Worthy to be a rebel, for to that +The multiplying villanies of nature 30 +Do swarm upon himโ€”from the western isles +Of kerns and gallowglasses is supplied; +And fortune, on his damned quarrel smiling, +Show'd like a rebel's whore: but all's too weak: +For brave Macbethโ€”well he deserves that nameโ€” 35 +Disdaining fortune, with his brandish'd steel, +Which smoked with bloody execution, +Like valour's minion carved out his passage +Till he faced the slave; +Which ne'er shook hands, nor bade farewell to him, 40 +Till he unseam'd him from the nave to the chaps, +And fix'd his head upon our battlements. + +Duncan. O valiant cousin! worthy gentleman! + +Sergeant. As whence the sun 'gins his reflection +Shipwrecking storms and direful thunders break, 45 +So from that spring whence comfort seem'd to come +Discomfort swells. Mark, king of Scotland, mark: +No sooner justice had with valour arm'd +Compell'd these skipping kerns to trust their heels, +But the Norweyan lord surveying vantage, 50 +With furbish'd arms and new supplies of men +Began a fresh assault. + +Duncan. Dismay'd not this +Our captains, Macbeth and Banquo? + +Sergeant. Yes; 55 +As sparrows eagles, or the hare the lion. +If I say sooth, I must report they were +As cannons overcharged with double cracks, so they +Doubly redoubled strokes upon the foe: +Except they meant to bathe in reeking wounds, 60 +Or memorise another Golgotha, +I cannot tell. +But I am faint, my gashes cry for help. + +Duncan. So well thy words become thee as thy wounds; +They smack of honour both. Go get him surgeons. 65 +[Exit Sergeant, attended] +Who comes here?" + +GENERATED="Malcolm. The worthy Thane of Ross. +Duncan. What a haste looks through a duel's wounds! 70 +Some must be pac'd. +[Exit Ross] +See this encounter is like to the poring +On of a beggar's story, told by one +That means to pluck upon the heart the strings +And draw the tears thriftily. 75 +[Enter Lennox] +How goes the night, boy? + +Lennox. The night is long that none should wake. + +Duncan. You do not need to stare. The Moor +To know the man. 'Tis the Moors devices. 80 +[Exit Lennox] +By the happy right of mine own hands, +Strike all that live in this poor thing of mine. +'Tis calld the Eyrie, and I am sick at heart. +As hellish-devils do the damned souls +O'their bad lives, thus ill-breveted, linger +O'er lamps and forks and other instruments +That prove the stages of the night. 90 +Good sir, take note; I bid you farewell: +Come sleep, and cut short this nitty romance. +[He sleeps.] +If cravens, I bear them like the Minion of the moon, +With tiptoe foot he sneaks and starts to be a man. 95 +And when he is found asleep, awake him with this armed' s address: +That sleep which th'assassin hallowed, +Scotland, awake; your king is murder'd, sleep no more. 100 +*Furbish'd. Weapons polished for battle. +*Thriftily. Fastidiously, thoughtfully. +*Eyrie. Fortress; the lair of birds of prey. +*Minion. A braggart, a coward. + +1.5 + +Macbeth. So foul and fair a day I have not seen. 5 +Ross. Good morning, noble Macbeth. I come from Inverness, +And find our throne void, the arm'd rest you; 10 +My Lord of Cassil has resigned his life. +Macbeth. Whate'er you owe, in time repay, fair friends. +Note you the words; I pray you do. +Ross. I am your faithful servant, and will keep +My sworn reward upon your life; my lord. +Macbeth. You shall be well rewarded; stay the press, 20 +And I'll not fail. How now, good fellow? +Servant. Sir, his schoolmaster. 25 +Macbeth. Well, good, though, old. +Tell me, good fellow, how goes the night? 30 +Servant. There's marrygold and fire in your veins, my lord. +Macbeth. He does commend you; the weight of this old night's embargoes 35 +Did one hour's waste of time lay upon him. +I know when we are too safe, 'tis dangerous to be secure; +Therefore our fearful parts do brave the danger 40 +Which knows it not. I see you are a gentleman. +And a laudable one too; I am most off obliged. +Servant. I should be sorry, my good lord, to have had the labour 45 +To outlive this damned hour. 50 +Macbeth. What's done cannot be undone. To bed, to bed, to bed. +Servant. Will it please you to lie still? 55 +Macbeth. Lord, lord, my heart is in my mouth. All's true that ends well. +Servant. I thank you, fair, and leave you to the content. 60 +Macbeth. You see, my lord, it smokes, and shows no cause +Why the drone dies. 65 +Servant. Grief fills the room up of one vast stair, +And downs our vaults to the inconstant man above. 70 +Macbeth. Go bid thy masters and thy mistress say, 75 +I have power in earth to do so much. +There's comfort yet. They are assailable. Then say I, +Thus ye may answer. +Servant. He cannot be wronged; or being wronged, 80 +I cannot help him. 85 +Macbeth. You know but by this; as this, 90 +The Jew foole is hang'd. 95 +Servant. No more today, my lord. 100 +Macbeth. He does shame to tell him he loves him, but not remove him 105 +From his true place; no. +Servant. That's true, and now I remember the story 110 +Of that sign in Leo four diurnal courses +Returning in a constant motion were within 115 +A boare that had on Taurus' back tetracted; 120 +Or neuer, or but once in modulated accidence. 125 +Macbeth. Thou climd'st alone, ty'd to the stag's horn. +Servant. I was a bull, for this the goodly year. 130 +Come, put me in my place. +Macbeth. Now go to sleep. 135 +Servant. The west neuer sett before the equinox 140 +Till now; and sunnes look'd not theyr frequencie 145 +Upon our lappe till now, my lord. 150 +Macbeth. This game of chance you term a gong. +Servant. A gong is a scotch word for an egg. 155 +Macbeth. Peace, be still. 160 +Servant. I coniecture I smell the blood of an Englishman. 165 +Macbeth. The faith is murthered. +Servant. That murder'd in his sleep. 170 +Macbeth. And sleeping murdered. 175 +Servant. In the fair queen heere in his royal court. 180 +Macbeth. So great a mercy that it may last eternally. +Servant. The earth hath bubbles as the water hath, 185 +And these are of them. Whate'er we will do 190 +To mend the trespasses of the comming time 195 +Shall be the seedes of new mischefe, and shall beget 200 +The formes of the extinctnese, which we are now. 205 +Macbeth. We have scorch'd the snake, not kill'd it. 210 +Servant. They hunt it in the morn. Good gally, good lord! 215 +It weares a gilded snout. 220 +Macbeth. It is the very painting of your fear. 225 +Servant. This is the worst. 230 +Macbeth. A fair quater of a mile is yet to go. 235 +Servant. A mile and half. 240 +Macbeth. I have run fifteen miles to-day. +Servant. A calender's date. +Macbeth. A bigger patch, a bigger patch. 245 +Servant. Thirteen of more. 250 +Macbeth. Wast thou with him? 255 +Servant. No, nor he to night. 260 +Macbeth. Thou seest the moon" + +echo "Generating, it can take a while..." + +OUTPUT=$(( ./dllama generate --seed 12345 --temperature 0.9 --topp 0.9 --prompt "$PROMPT" --weights-float-type q40 --buffer-float-type f32 --nthreads 2 --steps 2048 --model models/llama3_8b_q40/dllama_model_llama3_8b_q40.m --tokenizer models/llama3_8b_q40/dllama_tokenizer_llama3_8b_q40.t --workers 127.0.0.1:9999 127.0.0.1:9998 127.0.0.1:9997 ) 2>&1) + +echo "$OUTPUT" + +if [[ $OUTPUT == *"$GENERATED"* ]]; then + echo "โœ… Output is same" +else + echo "โŒ Output is different" +fi diff --git a/examples/n-workers.sh b/examples/n-workers.sh new file mode 100644 index 0000000..bd27ee8 --- /dev/null +++ b/examples/n-workers.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# This script starts N workers from a single command. Mainly useful for testing and debugging. +# Usage: +# +# W=7 T=2 bash n-workers.sh start +# W=7 bash n-workers.sh stop +# +# Env vars: +# W - n workers +# T - n threads per worker + +cd "$(dirname "$0")" + +if [ -z "$W" ]; then + W=3 +fi +if [ -z "$T" ]; then + T=1 +fi + +if [ "$1" == "start" ]; then + for (( w = 0; w < $W ; w += 1 )); + do + PORT=$(expr 9999 - $w) + PROC_ID=$(lsof -ti:$PORT) + if [ -n "$PROC_ID" ]; then + kill -9 $PROC_ID + echo "Killed process $PROC_ID" + fi + + mkdir -p dllama_worker_$w # macOs does not support -Logfile argument, so we place logs inside different directories + cd dllama_worker_$w + screen -d -L -S dllama_worker_$w -m ../../dllama worker --port $PORT --nthreads $T + cd .. + echo "Started worker $w on port $PORT" + done + + sleep 2 +elif [ "$1" == "stop" ]; then + for (( w = 0; w < $W ; w += 1 )); + do + screen -S dllama_worker_$w -X quit + done + + echo "Stopped $W workers" +else + echo "Usage: $0 [start|stop]" +fi + +echo "> screen -ls" +screen -ls diff --git a/launch.py b/launch.py new file mode 100644 index 0000000..f10f672 --- /dev/null +++ b/launch.py @@ -0,0 +1,195 @@ +import os +import sys +import time +import socket +import multiprocessing +from urllib.request import urlopen + +def parts(length): + result = [] + for i in range(length): + a = chr(97 + (i // 26)) + b = chr(97 + (i % 26)) + result.append(a + b) + return result + +# [['model-url-0', 'model-url-1', ...], 'tokenizer-url', 'weights-float-type', 'buffer-float-type', 'model-type'] +MODELS = { + 'llama3_1_8b_instruct_q40': [ + ['https://huggingface.co/b4rtaz/Llama-3_1-8B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_model_llama3.1_instruct_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Llama-3_1-8B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_tokenizer_llama_3_1.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'llama3_1_405b_instruct_q40': [ + list(map(lambda suffix : f'https://huggingface.co/b4rtaz/Llama-3_1-405B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_model_llama31_405b_q40_{suffix}?download=true', parts(56))), + 'https://huggingface.co/b4rtaz/Llama-3_1-405B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_tokenizer_llama_3_1.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'llama3_2_1b_instruct_q40': [ + ['https://huggingface.co/b4rtaz/Llama-3_2-1B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_model_llama3.2-1b-instruct_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Llama-3_2-1B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_tokenizer_llama3_2.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'llama3_2_3b_instruct_q40': [ + ['https://huggingface.co/b4rtaz/Llama-3_2-3B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_model_llama3.2-3b-instruct_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Llama-3_2-3B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_tokenizer_llama3_2.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'llama3_3_70b_instruct_q40': [ + list(map(lambda suffix : f'https://huggingface.co/b4rtaz/Llama-3_3-70B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_model_llama-3.3-70b_q40{suffix}?download=true', parts(11))), + 'https://huggingface.co/b4rtaz/Llama-3_3-70B-Q40-Instruct-Distributed-Llama/resolve/main/dllama_tokenizer_llama-3.3-70b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'deepseek_r1_distill_llama_8b_q40': [ + ['https://huggingface.co/b4rtaz/DeepSeek-R1-Distill-Llama-8B-Distributed-Llama/resolve/main/dllama_model_deepseek-r1-distill-llama-8b_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/DeepSeek-R1-Distill-Llama-8B-Distributed-Llama/resolve/main/dllama_tokenizer_deepseek-r1-distill-llama-8b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'qwen3_0.6b_q40': [ + ['https://huggingface.co/b4rtaz/Qwen3-0.6B-Q40-Distributed-Llama/resolve/main/dllama_model_qwen3_0.6b_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Qwen3-0.6B-Q40-Distributed-Llama/resolve/main/dllama_tokenizer_qwen3_0.6b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'qwen3_1.7b_q40': [ + ['https://huggingface.co/b4rtaz/Qwen3-1.7B-Q40-Distributed-Llama/resolve/main/dllama_model_qwen3_1.7b_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Qwen3-1.7B-Q40-Distributed-Llama/resolve/main/dllama_tokenizer_qwen3_1.7b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'qwen3_8b_q40': [ + ['https://huggingface.co/b4rtaz/Qwen3-8B-Q40-Distributed-Llama/resolve/main/dllama_model_qwen3_8b_q40.m?download=true'], + 'https://huggingface.co/b4rtaz/Qwen3-8B-Q40-Distributed-Llama/resolve/main/dllama_tokenizer_qwen3_8b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'qwen3_14b_q40': [ + list(map(lambda suffix : f'https://huggingface.co/b4rtaz/Qwen3-14B-Q40-Distributed-Llama/resolve/main/dllama_model_qwen3_14b_q40_{suffix}?download=true', parts(2))), + 'https://huggingface.co/b4rtaz/Qwen3-14B-Q40-Distributed-Llama/resolve/main/dllama_tokenizer_qwen3_14b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], + 'qwen3_30b_a3b_q40': [ + list(map(lambda suffix : f'https://huggingface.co/b4rtaz/Qwen3-30B-A3B-Q40-Distributed-Llama/resolve/main/dllama_model_qwen3_30b_a3b_{suffix}?download=true', parts(5))), + 'https://huggingface.co/b4rtaz/Qwen3-30B-A3B-Q40-Distributed-Llama/resolve/main/dllama_tokenizer_qwen3_30b_a3b.t?download=true', + 'q40', 'q80', 'chat', '--max-seq-len 4096' + ], +} + +def confirm(message: str): + alwaysYes = sys.argv.count('-y') > 0 + if alwaysYes: + return True + result = input(f'โ“ {message} ("Y" if yes): ').upper() + return result == 'Y' or result == 'YES' + +def downloadFile(urls, path: str): + if os.path.isfile(path): + fileName = os.path.basename(path) + if not confirm(f'{fileName} already exists, do you want to download again?'): + return + + socket.setdefaulttimeout(30) + lastSizeMb = 0 + with open(path, 'wb') as file: + for url in urls: + startPosition = file.tell() + success = False + for attempt in range(8): + print(f'๐Ÿ“„ {url} (attempt: {attempt})') + try: + with urlopen(url) as response: + while True: + chunk = response.read(4096) + if not chunk: + break + file.write(chunk) + sizeMb = file.tell() // (1024 * 1024) + if sizeMb != lastSizeMb: + sys.stdout.write("\rDownloaded %i MB" % sizeMb) + lastSizeMb = sizeMb + sys.stdout.write('\n') + success = True + break + except Exception as e: + print(f'\nโŒ Error downloading {url}: {e}') + file.seek(startPosition) + file.truncate() + time.sleep(1 * attempt) + if not success: + raise Exception(f'Failed to download {url}') + sys.stdout.write(' โœ…\n') + +def download(modelName: str, model: list): + dirPath = os.path.join('models', modelName) + print(f'๐Ÿ“€ Downloading {modelName} to {dirPath}...') + os.makedirs(dirPath, exist_ok=True) + modelUrls = model[0] + tokenizerUrl = model[1] + modelPath = os.path.join(dirPath, f'dllama_model_{modelName}.m') + tokenizerPath = os.path.join(dirPath, f'dllama_tokenizer_{modelName}.t') + downloadFile(modelUrls, modelPath) + downloadFile([tokenizerUrl], tokenizerPath) + print('๐Ÿ“€ All files are downloaded') + return (modelPath, tokenizerPath) + +def writeRunFile(modelName: str, command: str): + filePath = f'run_{modelName}.sh' + with open(filePath, 'w') as file: + file.write('#!/bin/sh\n') + file.write('\n') + file.write(f'{command}\n') + return filePath + +def printUsage(): + print('Usage: python download-model.py ') + print() + print('Options:') + print(' The name of the model to download') + print(' -skip-run Do not run the model after download') + print(' -skip-script Do not create a script to run the model') + print(' -y Skip confirmation prompts') + print() + print('Available models:') + for model in MODELS: + print(f' {model}') + +if __name__ == '__main__': + if (len(sys.argv) < 2): + printUsage() + exit(1) + + os.chdir(os.path.dirname(__file__)) + + modelName = sys.argv[1].replace('-', '_') + if modelName not in MODELS: + print(f'Model is not supported: {modelName}') + exit(1) + + model = MODELS[modelName] + (modelPath, tokenizerPath) = download(modelName, model) + + nThreads = multiprocessing.cpu_count() + if (model[4] == 'chat'): + command = './dllama chat' + else: + command = './dllama inference --steps 64 --prompt "Hello world"' + command += f' --model {modelPath} --tokenizer {tokenizerPath} --buffer-float-type {model[3]} --nthreads {nThreads}' + if (len(model) > 5): + command += f' {model[5]}' + + print('To run Distributed Llama you need to execute:') + print('--- copy start ---') + print() + print('\033[96m' + command + '\033[0m') + print() + print('--- copy end -----') + + skipRun = sys.argv.count('-skip-run') > 0 + skipScript = sys.argv.count('-skip-script') > 0 + + if (not skipScript): + runFilePath = writeRunFile(modelName, command) + print(f'๐ŸŒป Created {runFilePath} script to easy run') + + if (not skipRun): + if (confirm('Do you want to run Distributed Llama?')): + if (not os.path.isfile('dllama')): + os.system('make dllama') + os.system(command) diff --git a/report/report.pdf b/report/report.pdf new file mode 100644 index 0000000..d8552e7 Binary files /dev/null and b/report/report.pdf differ diff --git a/src/api-types.hpp b/src/api-types.hpp new file mode 100755 index 0000000..b417bca --- /dev/null +++ b/src/api-types.hpp @@ -0,0 +1,179 @@ +#ifndef API_TYPES_HPP +#define API_TYPES_HPP + +#include + +#include "json.hpp" + +using json = nlohmann::json; + +struct ChatMessageDelta { + std::string role; + std::string content; + + ChatMessageDelta() : role(""), content("") {} + ChatMessageDelta(const std::string& role_, const std::string& content_) : role(role_), content(content_) {} +}; + +struct ChatMessage { + std::string role; + std::string content; + + ChatMessage() : role(""), content("") {} + ChatMessage(const std::string& role_, const std::string& content_) : role(role_), content(content_) {} +}; + +struct ChunkChoice { + int index; + ChatMessageDelta delta; + std::string finish_reason; + + ChunkChoice() : index(0) {} +}; + + +struct Choice { + int index; + ChatMessage message; + std::string finish_reason; + + Choice() : finish_reason("") {} + Choice(ChatMessage &message_) : message(message_), finish_reason("") {} + Choice(const std::string &reason_) : finish_reason(reason_) {} +}; + +struct ChatCompletionChunk { + std::string id; + std::string object; + long long created; + std::string model; + std::vector choices; + + ChatCompletionChunk(ChunkChoice &choice_) + : id("cmpl-c0"), object("chat.completion"), model("Distributed Model") { + created = std::time(nullptr); // Set created to current Unix timestamp + choices.push_back(choice_); + } +}; + +// Struct to represent the usage object +struct ChatUsage { + int prompt_tokens; + int completion_tokens; + int total_tokens; + + ChatUsage() : prompt_tokens(0), completion_tokens(0), total_tokens(0) {} + ChatUsage(int pt, int ct, int tt) : prompt_tokens(pt), completion_tokens(ct), total_tokens(tt) {} +}; + +struct ChatCompletion { + std::string id; + std::string object; + long long created; // Unix timestamp + std::string model; + std::vector choices; + ChatUsage usage; + + ChatCompletion() : id(), object(), model() {} + ChatCompletion(const Choice &choice_, const ChatUsage& usage_) + : id("cmpl-j0"), object("chat.completion"), model("Distributed Model"), usage(usage_) { + created = std::time(nullptr); // Set created to current Unix timestamp + choices.push_back(choice_); + } +}; + +struct Model { + std::string id; + std::string object; + long long created; + std::string owned_by; + + Model() : id(), object(), created(0), owned_by() {} + Model(const std::string &id_) : id(id_), object("model"), created(0), owned_by("user") {} +}; + +struct ModelList { + std::string object; + std::vector data; + ModelList(): object("list") {} + ModelList(const Model &model_) : object("list") { + data.push_back(model_); + } +}; + +struct InferenceParams { + std::vector messages; + int max_tokens; + float temperature; + float top_p; + std::vector stop; + bool stream; + unsigned long long seed; +}; + +// Define to_json for Delta struct +void to_json(json& j, const ChatMessageDelta& msg) { + j = json{{"role", msg.role}, {"content", msg.content}}; +} + +void to_json(json& j, const ChatMessage& msg) { + j = json{{"role", msg.role}, {"content", msg.content}}; +} + +void to_json(json& j, const ChunkChoice& choice) { + j = json{{"index", choice.index}, {"delta", choice.delta}, {"finish_reason", choice.finish_reason}}; +} + +void to_json(json& j, const Choice& choice) { + j = json{{"index", choice.index}, {"message", choice.message}, {"finish_reason", choice.finish_reason}}; +} + +void to_json(json& j, const ChatCompletionChunk& completion) { + j = json{{"id", completion.id}, + {"object", completion.object}, + {"created", completion.created}, + {"model", completion.model}, + {"choices", completion.choices}}; +} + +void to_json(json& j, const ChatUsage& usage) { + j = json{{"completion_tokens", usage.completion_tokens}, + {"prompt_tokens", usage.prompt_tokens}, + {"total_tokens", usage.total_tokens}}; +} + +void to_json(json& j, const ChatCompletion& completion) { + j = json{{"id", completion.id}, + {"object", completion.object}, + {"created", completion.created}, + {"model", completion.model}, + {"usage", completion.usage}, + {"choices", completion.choices}}; +} + +void to_json(json& j, const Model& model) { + j = json{{"id", model.id}, + {"object", model.object}, + {"created", model.created}, + {"owned_by", model.owned_by}}; +} + +void to_json(json& j, const ModelList& models) { + j = json{{"object", models.object}, + {"data", models.data}}; +} + +std::vector parseChatMessages(json &json){ + std::vector messages; + messages.reserve(json.size()); + + for (const auto& item : json) { + messages.emplace_back( + item["role"].template get(), + item["content"].template get() + ); + } + return messages; +} + +#endif diff --git a/src/app.cpp b/src/app.cpp new file mode 100644 index 0000000..3cf858d --- /dev/null +++ b/src/app.cpp @@ -0,0 +1,358 @@ +#include "app.hpp" +#include +#include +#include +#if defined(DLLAMA_VULKAN) + #include "nn/nn-vulkan.hpp" +#endif + +static NnFloatType parseFloatType(char *val) { + if (std::strcmp(val, "f32") == 0) return F_32; + if (std::strcmp(val, "f16") == 0) return F_16; + if (std::strcmp(val, "q40") == 0) return F_Q40; + if (std::strcmp(val, "q80") == 0) return F_Q80; + throw std::runtime_error("Invalid float type: " + std::string(val)); +} + +static ChatTemplateType parseChatTemplateType(char *val) { + if (std::strcmp(val, "llama2") == 0) return TEMPLATE_LLAMA2; + if (std::strcmp(val, "llama3") == 0) return TEMPLATE_LLAMA3; + if (std::strcmp(val, "deepSeek3") == 0) return TEMPLATE_DEEP_SEEK3; + throw std::runtime_error("Invalid chat template type: " + std::string(val)); +} + +AppCliArgs AppCliArgs::parse(int argc, char* *argv, bool requireMode) { + AppCliArgs args; + args.help = false; + args.mode = nullptr; + args.nBatches = 32; + args.nThreads = 1; + args.modelPath = nullptr; + args.tokenizerPath = nullptr; + args.prompt = nullptr; + args.syncType = F_32; + args.nWorkers = 0; + args.workerHosts = nullptr; + args.workerPorts = nullptr; + args.port = 9990; + args.temperature = 0.8f; + args.topp = 0.9f; + args.steps = 0; + args.seed = (unsigned long long)time(nullptr); + args.chatTemplateType = TEMPLATE_UNKNOWN; + args.maxSeqLen = 0; + args.netTurbo = true; + args.gpuIndex = -1; + args.gpuSegmentFrom = -1; + args.gpuSegmentTo = -1; + + int i = 1; + if (requireMode && argc > 1) { + args.mode = argv[1]; + i++; + } + // First see if any of the args are asking for help/usage and fail fast + for (int x = 0; x < argc; x++) { + if ((std::strcmp(argv[x], "--usage") == 0) || + (std::strcmp(argv[x], "--help") == 0) || + (std::strcmp(argv[x], "-h") == 0)) { + args.help = true; + return args; + } + } + for (; i + 1 < argc; i += 2) { + char *name = argv[i]; + char *value = argv[i + 1]; + if (std::strcmp(name, "--model") == 0) { + args.modelPath = value; + } else if (std::strcmp(name, "--tokenizer") == 0) { + args.tokenizerPath = value; + } else if (std::strcmp(name, "--prompt") == 0) { + args.prompt = value; + } else if (std::strcmp(name, "--buffer-float-type") == 0) { + args.syncType = parseFloatType(value); + } else if (std::strcmp(name, "--workers") == 0) { + int j = i + 1; + for (; j < argc && argv[j][0] != '-'; j++); + int count = j - i - 1; + + args.nWorkers = count; + args.workerHosts = new char*[count]; + args.workerPorts = new NnUint[count]; + + for (int s = 0; s < count; s++) { + char *v = argv[i + 1 + s]; + char *separator = std::strstr(v, ":"); + if (separator == NULL) { + throw std::runtime_error("Invalid worker address: " + std::string(v)); + } + int hostLen = separator - v; + args.workerHosts[s] = new char[hostLen + 1]; + std::memcpy(args.workerHosts[s], v, hostLen); + args.workerHosts[s][hostLen] = '\0'; + args.workerPorts[s] = std::atoi(separator + 1); + } + + i += count - 1; + } else if (std::strcmp(name, "--port") == 0) { + args.port = atoi(value); + } else if (std::strcmp(name, "--nthreads") == 0) { + args.nThreads = atoi(value); + } else if (std::strcmp(name, "--steps") == 0) { + args.steps = atoi(value); + } else if (std::strcmp(name, "--temperature") == 0) { + args.temperature = atof(value); + } else if (std::strcmp(name, "--topp") == 0) { + args.topp = atof(value); + } else if (std::strcmp(name, "--seed") == 0) { + args.seed = atoll(value); + } else if (std::strcmp(name, "--chat-template") == 0) { + args.chatTemplateType = parseChatTemplateType(value); + } else if (std::strcmp(name, "--max-seq-len") == 0) { + args.maxSeqLen = (unsigned int)atoi(value); + } else if (std::strcmp(name, "--gpu-index") == 0) { + args.gpuIndex = atoi(value); + } else if (std::strcmp(name, "--gpu-segments") == 0) { + char *separator = std::strstr(value, ":"); + if (separator == NULL) + throw std::runtime_error("GPU segments expected in the format :"); + args.gpuSegmentFrom = atoi(value); + args.gpuSegmentTo = atoi(separator + 1); + } else if (std::strcmp(name, "--net-turbo") == 0) { + args.netTurbo = atoi(value) == 1; + } else { + throw std::runtime_error("Unknown option: " + std::string(name)); + } + } + + if (args.nThreads < 1) + throw std::runtime_error("Number of threads must be at least 1"); + return args; +} + +AppCliArgs::~AppCliArgs() { + if (workerHosts != nullptr) { + for (NnUint i = 0; i < nWorkers; i++) + delete[] workerHosts[i]; + delete[] workerHosts; + } + if (workerPorts != nullptr) + delete[] workerPorts; +} + +static std::vector resolveDevices(AppCliArgs *args, NnNetConfig *netConfig, NnNodeConfig *nodeConfig, NnNetExecution *netExecution) { + std::vector devices; + + if (args->gpuIndex >= 0) { +#if defined(DLLAMA_VULKAN) + devices.push_back(NnExecutorDevice( + new NnVulkanDevice(args->gpuIndex, netConfig, nodeConfig, netExecution), + args->gpuSegmentFrom, + args->gpuSegmentTo + )); +#else + throw std::runtime_error("This build does not support GPU"); +#endif + } + + if (args->gpuIndex < 0 || (args->gpuSegmentFrom >= 0 && args->gpuSegmentTo >= 0)) { + devices.push_back(NnExecutorDevice(new NnCpuDevice(netConfig, nodeConfig, netExecution), -1, -1)); + } + return devices; +} + +RootLlmInference::RootLlmInference(LlmNet *net, NnNetExecution *execution, NnExecutor *executor, NnNetwork *network) { + this->header = net->header; + this->tokenPipe = (float *)execution->pipes[net->tokenPipeIndex]; + this->positionPipe = (float *)execution->pipes[net->positionPipeIndex]; + this->logitsPipe = (float *)execution->pipes[net->logitsPipeIndex]; + this->execution = execution; + this->executor = executor; + this->network = network; // May be nullptr! +} + +void RootLlmInference::setBatchSize(NnUint batchSize) { + execution->setBatchSize(batchSize); + controlPacket.batchSize = batchSize; +} + +void RootLlmInference::setPosition(NnUint position) { + assert(position >= 0); + assert(position + execution->batchSize - 1 < header->seqLen); + + controlPacket.position = position; + for (NnUint i = 0; i < execution->batchSize; i++) + positionPipe[i] = (float)(position + i); +} + +void RootLlmInference::setToken(NnUint batchIndex, NnUint token) { + assert(batchIndex >= 0 && batchIndex < execution->batchSize); + tokenPipe[batchIndex] = (float)token; +} + +void RootLlmInference::forward() { + if (network != nullptr) + network->writeAll(&controlPacket, sizeof(LlmControlPacket)); + executor->forward(); +} + +void RootLlmInference::finish() { + if (network != nullptr) { + controlPacket.batchSize = 0; + network->writeAll(&controlPacket, sizeof(LlmControlPacket)); + } +} + +WorkerLlmInference::WorkerLlmInference(NnNetExecution *execution, NnNetwork *network) { + this->isFinished = false; + this->execution = execution; + this->network = network; + this->positionPipe = (float *)execution->pipes[0]; +} + +bool WorkerLlmInference::tryReadControlPacket() { + const unsigned long maxAttempts = 10000; + if (!network->tryReadWithMaxAttempts(ROOT_SOCKET_INDEX, &controlPacket, sizeof(LlmControlPacket), maxAttempts)) + return false; + if (controlPacket.batchSize == 0) { + printf("๐Ÿ›‘ Stop signal\n"); + isFinished = true; + return true; + } + for (NnUint i = 0; i < controlPacket.batchSize; i++) + positionPipe[i] = (float)(controlPacket.position + i); + execution->setBatchSize(controlPacket.batchSize); + return true; +} + +void runInferenceApp(AppCliArgs *args, void (*handler)(AppInferenceContext *context)) { + NnUint nNodes = args->nWorkers + 1; + + LlmHeader header = loadLlmHeader(args->modelPath, args->maxSeqLen, args->syncType); + if (nNodes > header.nKvHeads) + // TODO: https://github.com/b4rtaz/distributed-llama/issues/70 + throw std::runtime_error("This version does not support more nodes than the number of KV heads in the model"); + if (header.weightType == F_Q40 && header.syncType != F_Q80) + throw std::runtime_error("This version supports only Q40 weights with Q80 sync type"); + + Tokenizer tokenizer(args->tokenizerPath); + if (tokenizer.vocabSize != header.vocabSize) + printf("Tokenizer vocab size (%d) does not match the model vocab size (%d)\n", tokenizer.vocabSize, header.vocabSize); + + Sampler sampler(tokenizer.vocabSize, args->temperature, args->topp, args->seed); + + LlmNet net = buildLlmNet(&header, nNodes, args->nBatches); + std::unique_ptr netPtr(&net, releaseLlmNet); + + NnNodeConfig *rootNodeConfig = &net.nodeConfigs[0]; + + printLlmHeader(&header); + printNodeRequiredMemory(&net.netConfig, rootNodeConfig); + + NnNetExecution execution(args->nThreads, &net.netConfig); + + std::unique_ptr synchronizer(nullptr); + std::unique_ptr networkPtr(nullptr); + NnNetwork *network = nullptr; + + if (nNodes == 1) { + synchronizer.reset(new NnFakeNodeSynchronizer()); + } else { + networkPtr = NnNetwork::connect(args->nWorkers, args->workerHosts, args->workerPorts); + network = networkPtr.get(); + synchronizer.reset(new NnNetworkNodeSynchronizer(network, &execution, &net.netConfig, rootNodeConfig)); + + NnRootConfigWriter configWriter(network); + configWriter.writeToWorkers(&net.netConfig, net.nodeConfigs); + } + + std::vector devices = resolveDevices(args, &net.netConfig, rootNodeConfig, &execution); + NnExecutor executor(&net.netConfig, rootNodeConfig, &devices, &execution, synchronizer.get(), args->benchmark); + + NnRootWeightLoader weightLoader(&executor, network, nNodes); + loadLlmNetWeight(args->modelPath, &net, &weightLoader); + + RootLlmInference inference(&net, &execution, &executor, network); + + if (network != nullptr) { + network->resetStats(); + if (args->netTurbo) { + network->setTurbo(true); + printf("๐Ÿš Network is in non-blocking mode\n"); + } + } + + AppInferenceContext context; + context.args = args; + context.header = &header; + context.inference = &inference; + context.sampler = &sampler; + context.tokenizer = &tokenizer; + context.network = network; + context.executor = &executor; + + handler(&context); + + inference.finish(); +} + +void runWorkerApp(AppCliArgs *args) { + while (true) { + std::unique_ptr networkPtr = NnNetwork::serve(args->port); + NnNetwork *network = networkPtr.get(); + + NnWorkerConfigReader configReader(network); + NnNetConfig netConfig = configReader.readNet(); + NnNodeConfig nodeConfig = configReader.readNode(); + std::unique_ptr netConfigPtr(&netConfig, releaseNetConfig); + std::unique_ptr nodeConfigPtr(&nodeConfig, releaseNodeConfig); + + printNodeRequiredMemory(&netConfig, &nodeConfig); + + NnNetExecution execution(args->nThreads, &netConfig); + + std::vector devices = resolveDevices(args, &netConfig, &nodeConfig, &execution); + NnNetworkNodeSynchronizer synchronizer(network, &execution, &netConfig, &nodeConfig); + NnExecutor executor(&netConfig, &nodeConfig, &devices, &execution, &synchronizer, false); + + NnWorkerWeightReader weightReader(&executor, network); + weightReader.read(); + + WorkerLlmInference inference(&execution, network); + bool isFirstAttempt = true; + bool isTurboEnabled = false; + clock_t startTime; + while (true) { + try { + if (isFirstAttempt) + startTime = clock(); + + if (!inference.tryReadControlPacket()) { + if (isTurboEnabled && !isFirstAttempt && clock() - startTime > CLOCKS_PER_SEC) { + network->setTurbo(false); + isTurboEnabled = false; + printf("๐Ÿš Network is in blocking mode\n"); + } + isFirstAttempt = false; + continue; + } + if (inference.isFinished) + break; + + if (args->netTurbo && !isTurboEnabled) { + network->setTurbo(true); + isTurboEnabled = true; + printf("๐Ÿš Network is in non-blocking mode\n"); + } + executor.forward(); + isFirstAttempt = true; + } catch (const NnReadNetworkException &e) { + printf("Read network exception: %s\n", e.message); + break; + } catch (const NnWriteNetworkException &e) { + printf("Write network exception: %s\n", e.message); + break; + } + } + } +} diff --git a/src/app.hpp b/src/app.hpp new file mode 100644 index 0000000..7ed1429 --- /dev/null +++ b/src/app.hpp @@ -0,0 +1,95 @@ +#ifndef APP_HPP +#define APP_HPP + +#include +#include "nn/nn-core.hpp" +#include "nn/nn-cpu.hpp" +#include "tokenizer.hpp" +#include "llm.hpp" + +class AppCliArgs { +public: + char *mode; + NnUint nThreads; + NnUint nBatches; + bool help; + + // inference + char *modelPath; + char *tokenizerPath; + char *prompt; + NnFloatType syncType; + NnUint nWorkers; + char **workerHosts; + NnUint *workerPorts; + float temperature; + float topp; + NnUint steps; + bool benchmark; + unsigned long long seed; + ChatTemplateType chatTemplateType; + NnUint maxSeqLen; + bool netTurbo; + int gpuIndex; + int gpuSegmentFrom; + int gpuSegmentTo; + + // worker + NnUint port; + + static AppCliArgs parse(int argc, char **argv, bool hasMode); + ~AppCliArgs(); +}; + +typedef struct { + NnUint position; + NnUint batchSize; // 0 = stop signal +} LlmControlPacket; + +class RootLlmInference { +public: + float *logitsPipe; +private: + float *tokenPipe; + float *positionPipe; + LlmHeader *header; + NnNetExecution *execution; + NnExecutor *executor; + NnNetwork *network; + LlmControlPacket controlPacket; +public: + RootLlmInference(LlmNet *net, NnNetExecution *execution, NnExecutor *executor, NnNetwork *network); + void setBatchSize(NnUint batchSize); + void setPosition(NnUint position); + void setToken(NnUint batchIndex, NnUint token); + void forward(); + void finish(); +}; + +class WorkerLlmInference { +public: + bool isFinished; +private: + float *positionPipe; + NnNetExecution *execution; + NnNetwork *network; + LlmControlPacket controlPacket; +public: + WorkerLlmInference(NnNetExecution *execution, NnNetwork *network); + bool tryReadControlPacket(); +}; + +typedef struct { + AppCliArgs *args; + LlmHeader *header; + RootLlmInference *inference; + Tokenizer *tokenizer; + Sampler *sampler; + NnNetwork *network; + NnExecutor *executor; +} AppInferenceContext; + +void runInferenceApp(AppCliArgs *args, void (*handler)(AppInferenceContext *context)); +void runWorkerApp(AppCliArgs *args); + +#endif diff --git a/src/dllama-api.cpp b/src/dllama-api.cpp new file mode 100644 index 0000000..2f2a9f3 --- /dev/null +++ b/src/dllama-api.cpp @@ -0,0 +1,622 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#include +#endif + +#include "tokenizer.hpp" +#include "app.hpp" +#include "json.hpp" +#include "api-types.hpp" +#include "nn/nn-network.hpp" + +typedef unsigned int pos_t; + +using json = nlohmann::json; + +enum class HttpMethod { + METHOD_GET = 0, + METHOD_POST = 1, + METHOD_PUT = 2, + METHOD_DELETE = 3, + METHOD_OPTIONS = 4, + METHOD_UNKNOWN = 5 +}; + +class HttpRequest { +public: + static HttpRequest read(int serverSocket) { + HttpRequest req(serverSocket); + + std::vector httpRequest = req.readHttpRequest(); + // Parse the HTTP request + std::string data = std::string(httpRequest.begin(), httpRequest.end()); + + // Split request into lines + std::istringstream iss(data); + std::string line; + std::getline(iss, line); + + // Parse request line + std::istringstream lineStream(line); + std::string methodStr, path; + lineStream >> methodStr >> path; + req.method = parseMethod(methodStr); + req.path = path; + + // Parse headers + while (std::getline(iss, line) && line != "\r") { + size_t pos = line.find(':'); + if (pos != std::string::npos) { + std::string key = line.substr(0, pos); + std::string value = line.substr(pos + 2); // Skip ': ' after key + // Trim whitespace and non-printable characters from header value + value.erase(std::remove_if(value.begin(), value.end(), [](unsigned char c) { + return std::isspace(c) || !std::isprint(c); + }), value.end()); + req.headers[key] = value; + } + } + + // Parse body + std::getline(iss, req.body, '\0'); + + if (req.body.size() > 0) { + // printf("body: %s\n", req.body.c_str()); + req.parsedJson = json::parse(req.body); + } + return req; + } + + static HttpMethod parseMethod(const std::string& method) { + if (method == "GET") return HttpMethod::METHOD_GET; + if (method == "POST") return HttpMethod::METHOD_POST; + if (method == "PUT") return HttpMethod::METHOD_PUT; + if (method == "DELETE") return HttpMethod::METHOD_DELETE; + if (method == "OPTIONS") return HttpMethod::METHOD_OPTIONS; + return HttpMethod::METHOD_UNKNOWN; + } + +private: + int serverSocket; +public: + std::string path; + std::unordered_map headers; + std::string body; + json parsedJson; + HttpMethod method; + + HttpRequest(int serverSocket) { + this->serverSocket = serverSocket; + } + + std::vector readHttpRequest() { + std::string httpRequest; + char buffer[1024 * 64]; + ssize_t bytesRead; + + // First, read all headers + std::string headerData; + size_t headerEnd; + bool headerDone = false; + std::string extraReadPastHeader; + while (!headerDone) { + bytesRead = recv(serverSocket, buffer, sizeof(buffer) - 1, 0); + if (bytesRead <= 0) { + throw std::runtime_error("Error while reading headers from socket"); + } + buffer[bytesRead] = '\0'; + headerData.append(buffer); + + // Check for end of headers (http header says "\r\n\r\n") + headerEnd = headerData.find("\r\n\r\n"); + if (headerEnd != std::string::npos) { + headerDone = true; + if (headerEnd < headerData.size()-4) { + // We read something past the header + extraReadPastHeader = headerData.substr(headerEnd+4); + } + } + } + + httpRequest.append(headerData); + + // Next, find Content-Length header for body length + std::istringstream headerStream(headerData); + std::string line; + ssize_t contentLength = 0; + while (std::getline(headerStream, line) && line != "\r") { + size_t pos = line.find(':'); + if (pos != std::string::npos) { + std::string key = line.substr(0, pos); + std::string value = line.substr(pos + 2); // Skip ': ' after key + if (key == "Content-Length") { + try { + contentLength = std::stoi(value); // stoi ignores any whitespace + } catch (const std::invalid_argument& e) { + throw std::runtime_error("Bad Content-Length header - not a number"); + } + break; + } + } + } + + // Now read the full content body + if (contentLength > 0) { + // If we read any extra past the header before, read that much less now + // But first, sanity check to make sure Content-Length isn't lying and there is actually more + if (extraReadPastHeader.size() > static_cast(contentLength)) { + throw std::runtime_error("Received more body data than Content-Length header said"); + } + contentLength -= extraReadPastHeader.size(); + + std::vector body(contentLength); + ssize_t totalRead = 0; + while (totalRead < contentLength) { + bytesRead = recv(serverSocket, body.data() + totalRead, contentLength - totalRead, 0); + if (bytesRead <= 0) { + throw std::runtime_error("Error while reading body from socket"); + } + totalRead += bytesRead; + } + if (body.size() > 0) { + httpRequest.append(body.data(), contentLength); + } + } + + return std::vector(httpRequest.begin(), httpRequest.end()); + } + + std::string getMethod() { + if (method == HttpMethod::METHOD_GET) return "GET"; + if (method == HttpMethod::METHOD_POST) return "POST"; + if (method == HttpMethod::METHOD_PUT) return "PUT"; + if (method == HttpMethod::METHOD_DELETE) return "DELETE"; + if (method == HttpMethod::METHOD_OPTIONS) return "OPTIONS"; + return "UNKNOWN"; + } + + void writeCors() { + std::ostringstream buffer; + buffer << "HTTP/1.1 204 No Content\r\n" + << "Access-Control-Allow-Origin: *\r\n" + << "Access-Control-Allow-Methods: GET, POST, PUT, DELETE\r\n" + << "Access-Control-Allow-Headers: Content-Type, Authorization\r\n" + << "Connection: close\r\n" + << "\r\n"; + std::string data = buffer.str(); + writeSocket(serverSocket, data.c_str(), data.size()); + } + + void writeNotFound() { + std::ostringstream buffer; + buffer << "HTTP/1.1 404 Not Found\r\n" + << "Connection: close\r\n" + << "Content-Length: 9\r\n" + << "\r\n" + << "Not Found"; + std::string data = buffer.str(); + writeSocket(serverSocket, data.c_str(), data.size()); + } + + void writeJson(std::string json) { + std::ostringstream buffer; + buffer << "HTTP/1.1 200 OK\r\n" + << "Access-Control-Allow-Origin: *\r\n" + << "Content-Type: application/json; charset=utf-8\r\n" + << "Connection: close\r\n" + << "Content-Length: " << json.length() << "\r\n\r\n" << json; + std::string data = buffer.str(); + writeSocket(serverSocket, data.c_str(), data.size()); + } + + void writeStreamStartChunk() { + std::ostringstream buffer; + buffer << "HTTP/1.1 200 OK\r\n" + << "Access-Control-Allow-Origin: *\r\n" + << "Content-Type: text/event-stream; charset=utf-8\r\n" + << "Connection: close\r\n" + << "Transfer-Encoding: chunked\r\n\r\n"; + std::string data = buffer.str(); + writeSocket(serverSocket, data.c_str(), data.size()); + } + + void writeStreamChunk(const std::string data) { + std::ostringstream buffer; + buffer << std::hex << data.size() << "\r\n" << data << "\r\n"; + std::string d = buffer.str(); + writeSocket(serverSocket, d.c_str(), d.size()); + } + + void writeStreamEndChunk() { + const char *endChunk = "0000\r\n\r\n"; + writeSocket(serverSocket, endChunk, strlen(endChunk)); + } +}; + +struct Route { + std::string path; + HttpMethod method; + std::function handler; +}; + +class Router { +public: + static void resolve(HttpRequest& request, std::vector& routes) { + if (request.method == HttpMethod::METHOD_OPTIONS) { + request.writeCors(); + return; + } + for (const auto& route : routes) { + if (request.method == route.method && request.path == route.path) { + route.handler(request); + return; + } + } + request.writeNotFound(); + } +}; + +void writeChatCompletionChunk(HttpRequest &request, const std::string &delta, const bool stop){ + ChunkChoice choice; + if (stop) { + choice.finish_reason = "stop"; + } else { + choice.delta = ChatMessageDelta("assistant", delta); + } + ChatCompletionChunk chunk = ChatCompletionChunk(choice); + + std::ostringstream buffer; + buffer << "data: " << ((json)chunk).dump() << "\r\n\r\n"; + request.writeStreamChunk(buffer.str()); + + if (stop) { + request.writeStreamChunk("data: [DONE]"); + request.writeStreamEndChunk(); + } +} + +class NaiveCacheItem { +public: + pos_t endPos; + ChatMessage message; + NaiveCacheItem(pos_t endPos, ChatMessage message) { + this->endPos = endPos; + this->message = message; + } +}; + +class NaiveCache { +private: + std::vector cache; +public: + void push(NaiveCacheItem item) { + cache.push_back(item); + } + + void clear() { + cache.clear(); + } + + bool resolveDeltaPrompt(std::vector& messages, pos_t& startPos) { + size_t cacheSize = cache.size(); + if (cacheSize == 0) + return false; + if (messages.size() > cacheSize) { + size_t i = 0; + while (i < cacheSize) { + if ( + cache[i].message.role != messages[i].role || + cache[i].message.content != messages[i].content + ) break; + i++; + } + if (i == cacheSize) { + startPos = cache[i - 1].endPos; + messages.erase(messages.begin(), messages.begin() + i); + printf("๐Ÿค Found naive cache for %zu messages, pos=%d\n", i, startPos); + return true; + } + } + cache.clear(); + return false; + } +}; + +class ApiServer { +private: + RootLlmInference *inference; + Tokenizer *tokenizer; + Sampler *sampler; + AppCliArgs *args; + LlmHeader *header; + EosDetector *eosDetector; + ChatTemplateGenerator *templateGenerator; + NaiveCache naiveCache; + +public: + ApiServer(RootLlmInference *inference, Tokenizer *tokenizer, Sampler *sampler, AppCliArgs *args, LlmHeader *header, EosDetector *eosDetector, ChatTemplateGenerator *templateGenerator) { + this->inference = inference; + this->tokenizer = tokenizer; + this->sampler = sampler; + this->args = args; + this->header = header; + this->eosDetector = eosDetector; + this->templateGenerator = templateGenerator; + } + + void complete(HttpRequest& request) { + InferenceParams params = parseRequest(request); + + pos_t startPos = 0; + std::vector deltaPrompt = params.messages; + naiveCache.resolveDeltaPrompt(deltaPrompt, startPos); + + size_t nInputItems = deltaPrompt.size(); + std::unique_ptr inputItemsPtr(new ChatItem[nInputItems]); + ChatItem *inputItems = inputItemsPtr.get(); + for (size_t i = 0; i < nInputItems; i++) { + inputItems[i].role = deltaPrompt[i].role; + inputItems[i].message = deltaPrompt[i].content; + } + + GeneratedChat inputPrompt = templateGenerator->generate(nInputItems, inputItems, true); + printf("๐Ÿ”น%s๐Ÿ”ธ", inputPrompt.content); + + int nPromptTokens; + std::unique_ptr promptTokensPtr(new int[inputPrompt.length + 2]); + int *promptTokens = promptTokensPtr.get(); + bool isStart = startPos == 0; + tokenizer->encode((char*)inputPrompt.content, promptTokens, &nPromptTokens, isStart, true); + + pos_t promptEndPos = startPos + nPromptTokens - 1; + if (promptEndPos > header->seqLen) + promptEndPos = header->seqLen; + + pos_t maxPredPos = params.max_tokens > 0 ? (promptEndPos + params.max_tokens) : header->seqLen; + if (maxPredPos > header->seqLen) + maxPredPos = header->seqLen; + + for (size_t j = 0; j < deltaPrompt.size(); j++) { + naiveCache.push(NaiveCacheItem(promptEndPos, deltaPrompt[j])); + } + + std::string buffer; + + if (params.stream) + request.writeStreamStartChunk(); + if (inputPrompt.publicPrompt != nullptr) { + if (params.stream) + writeChatCompletionChunk(request, inputPrompt.publicPrompt, false); + buffer += inputPrompt.publicPrompt; + } + + NnUint pos = startPos; + int token; + for (NnUint i = 0; ;) { + long remainingTokens = promptEndPos - pos; + if (remainingTokens <= 0) + break; + + NnUint batchSize = remainingTokens < args->nBatches + ? remainingTokens + : args->nBatches; + + inference->setBatchSize(batchSize); + inference->setPosition(pos); + for (NnUint j = 0; j < batchSize; j++) + inference->setToken(j, promptTokens[i + j]); + + inference->forward(); + + i += batchSize; + pos += batchSize; + token = promptTokens[i + 1]; + } + + inference->setBatchSize(1); + tokenizer->resetDecoder(); + eosDetector->reset(); + + for (; pos < maxPredPos;) { + inference->setPosition(pos); + inference->setToken(0, token); + inference->forward(); + + token = sampler->sample(inference->logitsPipe); + + char *piece = tokenizer->decode(token); + EosDetectorType eosType = eosDetector->append(token, piece); + + if (piece != nullptr) { + printf("%s", piece); + fflush(stdout); + } + + if (eosType == NOT_EOS || eosType == EOS) { + char *delta = eosDetector->getDelta(); + if (delta != nullptr) { + std::string deltaStr(delta); + if (params.stream) + writeChatCompletionChunk(request, deltaStr, false); + buffer += deltaStr; + } + eosDetector->reset(); + } + pos++; + if (eosType == EOS) break; + } + + ChatMessage chatMessage("assistant", buffer); + if (pos == header->seqLen) { + naiveCache.clear(); + } else { + naiveCache.push(NaiveCacheItem(pos, chatMessage)); + } + + if (params.stream) { + writeChatCompletionChunk(request, "", true); + } else { + int nCompletionTokens = pos - promptEndPos; + ChatUsage usage(nPromptTokens, nCompletionTokens, nPromptTokens + nCompletionTokens); + Choice choice(chatMessage); + ChatCompletion completion(choice, usage); + std::string chatJson = ((json)completion).dump(); + request.writeJson(chatJson); + } + printf("๐Ÿ”ถ\n"); + fflush(stdout); + } + +private: + InferenceParams parseRequest(HttpRequest& request) { + InferenceParams params; + params.temperature = args->temperature; + params.top_p = args->topp; + params.seed = args->seed; + params.stream = false; + params.messages = parseChatMessages(request.parsedJson["messages"]); + params.max_tokens = -1; + + if (request.parsedJson.contains("stream")) { + params.stream = request.parsedJson["stream"].get(); + } + if (request.parsedJson.contains("temperature")) { + params.temperature = request.parsedJson["temperature"].template get(); + } + if (request.parsedJson.contains("seed")) { + params.seed = request.parsedJson["seed"].template get(); + sampler->setSeed(params.seed); + } + if (request.parsedJson.contains("max_tokens")) { + params.max_tokens = request.parsedJson["max_tokens"].template get(); + } + if (request.parsedJson.contains("stop")) { + params.stop = request.parsedJson["stop"].template get>(); + } else { + const std::string defaultStop = "<|eot_id|>"; + params.stop = std::vector{defaultStop}; + } + return params; + } +}; + +void handleCompletionsRequest(HttpRequest& request, ApiServer *api) { + api->complete(request); +} + +void handleModelsRequest(HttpRequest& request, const char* modelPath) { + std::string path(modelPath); + size_t pos = path.find_last_of("/\\"); + std::string modelName = (pos == std::string::npos) ? path : path.substr(pos + 1); + + Model model(modelName); + ModelList list(model); + std::string response = ((json)list).dump(); + request.writeJson(response); +} + +static void server(AppInferenceContext *context) { + int serverSocket = createServerSocket(context->args->port); + + TokenizerChatStops stops(context->tokenizer); + ChatTemplateGenerator templateGenerator(context->args->chatTemplateType, context->tokenizer->chatTemplate, stops.stops[0]); + EosDetector eosDetector(stops.nStops, context->tokenizer->eosTokenIds.data(), stops.stops, stops.maxStopLength, stops.maxStopLength); + ApiServer api(context->inference, context->tokenizer, context->sampler, context->args, context->header, &eosDetector, &templateGenerator); + + printf("Server URL: http://127.0.0.1:%d/v1/\n", context->args->port); + + std::vector routes = { + { + "/v1/chat/completions", + HttpMethod::METHOD_POST, + std::bind(&handleCompletionsRequest, std::placeholders::_1, &api) + }, + { + "/v1/models", + HttpMethod::METHOD_GET, + std::bind(&handleModelsRequest, std::placeholders::_1, context->args->modelPath) + } + }; + + while (true) { + try { + int clientSocket = acceptSocket(serverSocket); + HttpRequest request = HttpRequest::read(clientSocket); + printf("๐Ÿ”ท %s %s\n", request.getMethod().c_str(), request.path.c_str()); + Router::resolve(request, routes); + destroySocket(clientSocket); + } catch (NnReadNetworkException& ex) { + printf("Read socket error: %d %s\n", ex.code, ex.message); + } catch (NnWriteNetworkException& ex) { + printf("Write socket error: %d %s\n", ex.code, ex.message); + } + } + + destroySocket(serverSocket); +} + +#ifdef _WIN32 + #define EXECUTABLE_NAME "dllama-api.exe" +#else + #define EXECUTABLE_NAME "dllama-api" +#endif + +void usage() { + fprintf(stderr, "Usage: %s {--model } {--tokenizer } [--port

]\n", EXECUTABLE_NAME); + fprintf(stderr, " [--buffer-float-type {f32|f16|q40|q80}]\n"); + fprintf(stderr, " [--weights-float-type {f32|f16|q40|q80}]\n"); + fprintf(stderr, " [--max-seq-len ]\n"); + fprintf(stderr, " [--nthreads ]\n"); + fprintf(stderr, " [--workers ...]\n"); + fprintf(stderr, " [--temperature ]\n"); + fprintf(stderr, " [--topp ]\n"); + fprintf(stderr, " [--seed ]\n"); + fprintf(stderr, "Example:\n"); + fprintf(stderr, " sudo nice -n -20 ./dllama-api --port 9990 --nthreads 4 \\\n"); + fprintf(stderr, " --model dllama_model_llama3_2_3b_instruct_q40.m \\\n"); + fprintf(stderr, " --tokenizer dllama_tokenizer_llama3_2_3b_instruct_q40.t \\\n"); + fprintf(stderr, " --buffer-float-type q80 --max-seq-len 8192 \\\n"); + fprintf(stderr, " --workers 10.0.0.2:9998 10.0.0.3:9998 10.0.0.4:9998\n"); + fflush(stderr); +} + +int main(int argc, char *argv[]) { +#ifdef SIGPIPE + std::signal(SIGPIPE, SIG_IGN); +#endif + + initQuants(); + initSockets(); + + int returnCode = EXIT_SUCCESS; + try { + AppCliArgs args = AppCliArgs::parse(argc, argv, false); + if (args.help) { + usage(); + } else { + runInferenceApp(&args, server); + } + } catch (std::exception &e) { + printf("๐Ÿšจ Critical error: %s\n", e.what()); + returnCode = EXIT_FAILURE; + } + + cleanupSockets(); + return returnCode; +} diff --git a/src/dllama.cpp b/src/dllama.cpp new file mode 100644 index 0000000..6fae8f9 --- /dev/null +++ b/src/dllama.cpp @@ -0,0 +1,285 @@ +#include "nn/nn-core.hpp" +#include "nn/nn-config-builder.hpp" +#include "nn/nn-cpu.hpp" +#include "nn/nn-cpu-ops.hpp" +#include "nn/nn-network.hpp" +#include "nn/nn-executor.hpp" +#include "llm.hpp" +#include "tokenizer.hpp" +#include "app.hpp" +#include +#include + +static void inference(AppInferenceContext *context) { + if (context->args->prompt == nullptr) + throw std::runtime_error("Prompt is required"); + if (context->args->steps == 0) + throw std::runtime_error("Number of steps is required"); + + std::vector inputTokensVec(std::strlen(context->args->prompt) + 3); + int *inputTokens = inputTokensVec.data(); + + NnUint pos = 0; + int nInputTokens; + context->tokenizer->encode(context->args->prompt, inputTokens, &nInputTokens, true, true); + + if (nInputTokens > context->header->seqLen) + throw std::runtime_error("The number of prompt tokens is greater than the sequence length"); + if (nInputTokens > context->args->steps) + throw std::runtime_error("The number of prompt tokens is greater than the number of steps"); + + NnSize sentBytes = 0; + NnSize recvBytes = 0; + NnUint evalTotalTime = 0; + NnUint predTotalTime = 0; + + int token = inputTokens[pos]; + printf("%s\n", context->args->prompt); + for (;;) { + long remainingTokens = nInputTokens - 1 - (long)pos; + if (remainingTokens <= 0) + break; + NnUint batchSize = remainingTokens < context->args->nBatches + ? remainingTokens + : context->args->nBatches; + + context->inference->setBatchSize(batchSize); + context->inference->setPosition(pos); + for (NnUint i = 0; i < batchSize; i++) + context->inference->setToken(i, inputTokens[pos + i]); + + context->inference->forward(); + + pos += batchSize; + token = inputTokens[pos + 1]; + + if (context->network != nullptr) + context->network->getStats(&sentBytes, &recvBytes); + + NnUint evalTime = context->executor->getTotalTime(STEP_EXECUTE_OP); + NnUint syncTime = context->executor->getTotalTime(STEP_SYNC_NODES); + printf("๐Ÿ”ท๏ธ Eval%5u ms Sync%5u ms | Sent%6zu kB Recv%6zu kB | (%d tokens)\n", + evalTime / 1000, + syncTime / 1000, + sentBytes / 1024, + recvBytes / 1024, + batchSize); + evalTotalTime += evalTime + syncTime; + } + + fflush(stdout); + + context->inference->setBatchSize(1); + context->tokenizer->resetDecoder(); + + const NnUint maxPos = std::min(context->header->seqLen, context->args->steps); + for (; pos < maxPos; pos++) { + context->inference->setPosition(pos); + context->inference->setToken(0, token); + context->inference->forward(); + + token = context->sampler->sample(context->inference->logitsPipe); + + char *piece = context->tokenizer->decode(token); + + if (context->network != nullptr) + context->network->getStats(&sentBytes, &recvBytes); + + NnUint predTime = context->executor->getTotalTime(STEP_EXECUTE_OP); + NnUint syncTime = context->executor->getTotalTime(STEP_SYNC_NODES); + printf("๐Ÿ”ถ Pred%5u ms Sync%5u ms | Sent%6zu kB Recv%6zu kB | %s\n", + predTime / 1000, + syncTime / 1000, + sentBytes / 1024, + recvBytes / 1024, + piece == nullptr ? "~" : piece); + fflush(stdout); + predTotalTime += predTime + syncTime; + } + + NnUint nEvalTokens = nInputTokens - 1; + NnUint nPredTokens = pos - nEvalTokens; + float evalTotalTimeMs = evalTotalTime / 1000.0; + float predTotalTimeMs = predTotalTime / 1000.0; + printf("\n"); + printf("Evaluation\n"); + printf(" nBatches: %d\n", context->args->nBatches); + printf(" nTokens: %d\n", nEvalTokens); + printf(" tokens/s: %3.2f (%3.2f ms/tok)\n", + (nEvalTokens * 1000) / evalTotalTimeMs, + evalTotalTimeMs / ((float) nEvalTokens)); + printf("Prediction\n"); + printf(" nTokens: %d\n", nPredTokens); + printf(" tokens/s: %3.2f (%3.2f ms/tok)\n", + (nPredTokens * 1000) / predTotalTimeMs, + predTotalTimeMs / ((float) nPredTokens)); +} + +static NnUint readStdin(const char *guide, char *buffer, NnUint size) { + std::fflush(stdin); + std::printf("%s", guide); + if (std::fgets(buffer, size, stdin) != NULL) { + NnUint length = std::strlen(buffer); + if (length > 0 && buffer[length - 1] == '\n') { + buffer[length - 1] = '\0'; + length--; + } + return length; + } + return 0; +} + +static void perplexity(AppInferenceContext *context) { + if (context->args->prompt == nullptr) + throw std::runtime_error("Prompt is required"); + + std::vector inputTokensVec(std::strlen(context->args->prompt) + 3); + int *inputTokens = inputTokensVec.data(); + + int nInputTokens; + context->tokenizer->encode(context->args->prompt, inputTokens, &nInputTokens, true, true); + + printf("Evaluating %d tokens...\n", nInputTokens); + + float totalLogProb = 0.0f; + NnUint pos = 0; + + context->inference->setBatchSize(1); + + for (pos = 0; pos < nInputTokens - 1; pos++) { + context->inference->setPosition(pos); + context->inference->setToken(0, inputTokens[pos]); + context->inference->forward(); + + float *logits = context->inference->logitsPipe; + softmax_F32(logits, context->header->vocabSize); + + int targetToken = inputTokens[pos + 1]; + float prob = logits[targetToken]; + + totalLogProb += std::log(std::max(prob, 1e-30f)); + printf("%5d / %d, prob=%f\n", pos + 1, nInputTokens - 1, prob); + } + + float avgLogProb = totalLogProb / (float)(nInputTokens - 1); + float perplexity = expf(-avgLogProb); + + printf("\n"); + printf("Results\n"); + printf(" perplexity: %f (lower = better)\n", perplexity); + printf(" avgLogProb: %f\n", avgLogProb); + printf(" bitPerToken: %f\n", -avgLogProb / std::log(2.0)); +} + +static void chat(AppInferenceContext *context) { + const NnUint seqLen = context->header->seqLen; + char prompt[2048]; + + TokenizerChatStops stops(context->tokenizer); + ChatTemplateGenerator templateGenerator(context->args->chatTemplateType, context->tokenizer->chatTemplate, stops.stops[0]); + EosDetector eosDetector(stops.nStops, context->tokenizer->eosTokenIds.data(), stops.stops, stops.maxStopLength, stops.maxStopLength); + + const NnUint sysPromptLength = readStdin("๐Ÿ’ป System prompt (optional): ", prompt, sizeof(prompt)); + std::vector deltaItems; + if (sysPromptLength > 0) + deltaItems.push_back(ChatItem{"system", prompt}); + + NnUint pos = 0; + NnUint userPromptLength; + int token; + int nInputTokens; + do { + do { + userPromptLength = readStdin("\n๐Ÿ‘ฑ User\n> ", prompt, sizeof(prompt)); + } while (userPromptLength == 0); + + deltaItems.push_back(ChatItem{"user", prompt}); + + GeneratedChat inputPrompt = templateGenerator.generate(deltaItems.size(), deltaItems.data(), true); + std::unique_ptr inputTokensPtr(new int[inputPrompt.length + 2]); + int *inputTokens = inputTokensPtr.get(); + + bool isStart = pos == 0; + context->tokenizer->encode((char*)inputPrompt.content, inputTokens, &nInputTokens, isStart, true); + + NnUint userPromptEndPos = (NnUint)std::min(seqLen, pos + nInputTokens - 1); + for (NnUint i = 0; ;) { + int remainingTokens = userPromptEndPos - pos; + if (remainingTokens <= 0) + break; + NnUint batchSize = remainingTokens < context->args->nBatches + ? remainingTokens + : context->args->nBatches; + + context->inference->setBatchSize(batchSize); + context->inference->setPosition(pos); + for (NnUint j = 0; j < batchSize; j++) + context->inference->setToken(j, inputTokens[i + j]); + + context->inference->forward(); + + i += batchSize; + pos += batchSize; + token = inputTokens[i + 1]; + } + + context->inference->setBatchSize(1); + context->tokenizer->resetDecoder(); + + printf("\n๐Ÿค– Assistant\n"); + if (inputPrompt.publicPrompt != nullptr) + printf("%s", inputPrompt.publicPrompt); + + while (pos < seqLen) { + context->inference->setPosition(pos); + context->inference->setToken(0, token); + context->inference->forward(); + + token = context->sampler->sample(context->inference->logitsPipe); + + char *piece = context->tokenizer->decode(token); + EosDetectorType eosType = eosDetector.append(token, piece); + if (eosType == NOT_EOS || eosType == EOS) { + char *delta = eosDetector.getDelta(); + if (delta != nullptr) { + printf("%s", delta); + fflush(stdout); + } + eosDetector.reset(); + } + pos++; + if (eosType == EOS) break; + } + + deltaItems.clear(); + } while (pos < seqLen); + + printf("(end of context)\n"); +} + +int main(int argc, char **argv) { + initQuants(); + initSockets(); + + int returnCode = EXIT_SUCCESS; + try { + AppCliArgs args = AppCliArgs::parse(argc, argv, true); + if (std::strcmp(args.mode, "inference") == 0) { + args.benchmark = true; + runInferenceApp(&args, &inference); + } else if (std::strcmp(args.mode, "perplexity") == 0) + runInferenceApp(&args, &perplexity); + else if (std::strcmp(args.mode, "chat") == 0) + runInferenceApp(&args, &chat); + else if (std::strcmp(args.mode, "worker") == 0) + runWorkerApp(&args); + else + throw std::runtime_error("Unsupported mode"); + } catch (std::exception &e) { + printf("๐Ÿšจ Critical error: %s\n", e.what()); + returnCode = EXIT_FAILURE; + } + + cleanupSockets(); + return returnCode; +} diff --git a/src/json.hpp b/src/json.hpp new file mode 100644 index 0000000..8b72ea6 --- /dev/null +++ b/src/json.hpp @@ -0,0 +1,24765 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + +/****************************************************************************\ + * Note on documentation: The source files contain links to the online * + * documentation of the public API at https://json.nlohmann.me. This URL * + * contains the most recent documentation and should also be applicable to * + * previous versions; documentation for deprecated functions is not * + * removed, but marked deprecated. See "Generate documentation" section in * + * file docs/README.md. * +\****************************************************************************/ + +#ifndef INCLUDE_NLOHMANN_JSON_HPP_ +#define INCLUDE_NLOHMANN_JSON_HPP_ + +#include // all_of, find, for_each +#include // nullptr_t, ptrdiff_t, size_t +#include // hash, less +#include // initializer_list +#ifndef JSON_NO_IO + #include // istream, ostream +#endif // JSON_NO_IO +#include // random_access_iterator_tag +#include // unique_ptr +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// This file contains all macro definitions affecting or depending on the ABI + +#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK + #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 + #warning "Already included a different version of the library!" + #endif + #endif +#endif + +#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) + +#ifndef JSON_DIAGNOSTICS + #define JSON_DIAGNOSTICS 0 +#endif + +#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 +#endif + +#if JSON_DIAGNOSTICS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS +#endif + +#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp +#else + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION + #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 +#endif + +// Construct the namespace ABI tags component +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) + +#define NLOHMANN_JSON_ABI_TAGS \ + NLOHMANN_JSON_ABI_TAGS_CONCAT( \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) + +// Construct the namespace version component +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ + _v ## major ## _ ## minor ## _ ## patch +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) + +#if NLOHMANN_JSON_NAMESPACE_NO_VERSION +#define NLOHMANN_JSON_NAMESPACE_VERSION +#else +#define NLOHMANN_JSON_NAMESPACE_VERSION \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ + NLOHMANN_JSON_VERSION_MINOR, \ + NLOHMANN_JSON_VERSION_PATCH) +#endif + +// Combine namespace components +#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b +#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ + NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) + +#ifndef NLOHMANN_JSON_NAMESPACE +#define NLOHMANN_JSON_NAMESPACE \ + nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN +#define NLOHMANN_JSON_NAMESPACE_BEGIN \ + namespace nlohmann \ + { \ + inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) \ + { +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_END +#define NLOHMANN_JSON_NAMESPACE_END \ + } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ + } // namespace nlohmann +#endif + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // transform +#include // array +#include // forward_list +#include // inserter, front_inserter, end +#include // map +#include // string +#include // tuple, make_tuple +#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible +#include // unordered_map +#include // pair, declval +#include // valarray + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // nullptr_t +#include // exception +#if JSON_DIAGNOSTICS + #include // accumulate +#endif +#include // runtime_error +#include // to_string +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // array +#include // size_t +#include // uint8_t +#include // string + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // declval, pair +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template struct make_void +{ + using type = void; +}; +template using void_t = typename make_void::type; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +// https://en.cppreference.com/w/cpp/experimental/is_detected +struct nonesuch +{ + nonesuch() = delete; + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + nonesuch(nonesuch const&&) = delete; + void operator=(nonesuch const&) = delete; + void operator=(nonesuch&&) = delete; +}; + +template class Op, + class... Args> +struct detector +{ + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> +{ + using value_t = std::true_type; + using type = Op; +}; + +template class Op, class... Args> +using is_detected = typename detector::value_t; + +template class Op, class... Args> +struct is_detected_lazy : is_detected { }; + +template class Op, class... Args> +using detected_t = typename detector::type; + +template class Op, class... Args> +using detected_or = detector; + +template class Op, class... Args> +using detected_or_t = typename detected_or::type; + +template class Op, class... Args> +using is_detected_exact = std::is_same>; + +template class Op, class... Args> +using is_detected_convertible = + std::is_convertible, To>; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + + +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson +// SPDX-License-Identifier: MIT + +/* Hedley - https://nemequ.github.io/hedley + * Created by Evan Nemerson + */ + +#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) +#if defined(JSON_HEDLEY_VERSION) + #undef JSON_HEDLEY_VERSION +#endif +#define JSON_HEDLEY_VERSION 15 + +#if defined(JSON_HEDLEY_STRINGIFY_EX) + #undef JSON_HEDLEY_STRINGIFY_EX +#endif +#define JSON_HEDLEY_STRINGIFY_EX(x) #x + +#if defined(JSON_HEDLEY_STRINGIFY) + #undef JSON_HEDLEY_STRINGIFY +#endif +#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) + +#if defined(JSON_HEDLEY_CONCAT_EX) + #undef JSON_HEDLEY_CONCAT_EX +#endif +#define JSON_HEDLEY_CONCAT_EX(a,b) a##b + +#if defined(JSON_HEDLEY_CONCAT) + #undef JSON_HEDLEY_CONCAT +#endif +#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) + +#if defined(JSON_HEDLEY_CONCAT3_EX) + #undef JSON_HEDLEY_CONCAT3_EX +#endif +#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c + +#if defined(JSON_HEDLEY_CONCAT3) + #undef JSON_HEDLEY_CONCAT3 +#endif +#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) + +#if defined(JSON_HEDLEY_VERSION_ENCODE) + #undef JSON_HEDLEY_VERSION_ENCODE +#endif +#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) + #undef JSON_HEDLEY_VERSION_DECODE_MAJOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) + #undef JSON_HEDLEY_VERSION_DECODE_MINOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) + #undef JSON_HEDLEY_VERSION_DECODE_REVISION +#endif +#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) + +#if defined(JSON_HEDLEY_GNUC_VERSION) + #undef JSON_HEDLEY_GNUC_VERSION +#endif +#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +#elif defined(__GNUC__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) +#endif + +#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) + #undef JSON_HEDLEY_GNUC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GNUC_VERSION) + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION) + #undef JSON_HEDLEY_MSVC_VERSION +#endif +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) +#elif defined(_MSC_FULL_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) +#elif defined(_MSC_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) + #undef JSON_HEDLEY_MSVC_VERSION_CHECK +#endif +#if !defined(JSON_HEDLEY_MSVC_VERSION) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) +#elif defined(_MSC_VER) && (_MSC_VER >= 1200) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) +#else + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION) + #undef JSON_HEDLEY_INTEL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) +#elif defined(__INTEL_COMPILER) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_VERSION) + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #undef JSON_HEDLEY_INTEL_CL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) + #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION) + #undef JSON_HEDLEY_PGI_VERSION +#endif +#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) + #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION_CHECK) + #undef JSON_HEDLEY_PGI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PGI_VERSION) + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #undef JSON_HEDLEY_SUNPRO_VERSION +#endif +#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) +#elif defined(__SUNPRO_C) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) +#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) +#elif defined(__SUNPRO_CC) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) + #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION +#endif +#if defined(__EMSCRIPTEN__) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION) + #undef JSON_HEDLEY_ARM_VERSION +#endif +#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) +#elif defined(__CC_ARM) && defined(__ARMCC_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION_CHECK) + #undef JSON_HEDLEY_ARM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_ARM_VERSION) + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION) + #undef JSON_HEDLEY_IBM_VERSION +#endif +#if defined(__ibmxl__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) +#elif defined(__xlC__) && defined(__xlC_ver__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) +#elif defined(__xlC__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION_CHECK) + #undef JSON_HEDLEY_IBM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IBM_VERSION) + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_VERSION) + #undef JSON_HEDLEY_TI_VERSION +#endif +#if \ + defined(__TI_COMPILER_VERSION__) && \ + ( \ + defined(__TMS470__) || defined(__TI_ARM__) || \ + defined(__MSP430__) || \ + defined(__TMS320C2000__) \ + ) +#if (__TI_COMPILER_VERSION__ >= 16000000) + #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif +#endif + +#if defined(JSON_HEDLEY_TI_VERSION_CHECK) + #undef JSON_HEDLEY_TI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_VERSION) + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #undef JSON_HEDLEY_TI_CL2000_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) + #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #undef JSON_HEDLEY_TI_CL430_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) + #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #undef JSON_HEDLEY_TI_ARMCL_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) + #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) + #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #undef JSON_HEDLEY_TI_CL6X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) + #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #undef JSON_HEDLEY_TI_CL7X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) + #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #undef JSON_HEDLEY_TI_CLPRU_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) + #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION) + #undef JSON_HEDLEY_CRAY_VERSION +#endif +#if defined(_CRAYC) + #if defined(_RELEASE_PATCHLEVEL) + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) + #else + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) + #undef JSON_HEDLEY_CRAY_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_CRAY_VERSION) + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION) + #undef JSON_HEDLEY_IAR_VERSION +#endif +#if defined(__IAR_SYSTEMS_ICC__) + #if __VER__ > 1000 + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) + #else + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION_CHECK) + #undef JSON_HEDLEY_IAR_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IAR_VERSION) + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION) + #undef JSON_HEDLEY_TINYC_VERSION +#endif +#if defined(__TINYC__) + #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) + #undef JSON_HEDLEY_TINYC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION) + #undef JSON_HEDLEY_DMC_VERSION +#endif +#if defined(__DMC__) + #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION_CHECK) + #undef JSON_HEDLEY_DMC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_DMC_VERSION) + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #undef JSON_HEDLEY_COMPCERT_VERSION +#endif +#if defined(__COMPCERT_VERSION__) + #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) + #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION) + #undef JSON_HEDLEY_PELLES_VERSION +#endif +#if defined(__POCC__) + #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) + #undef JSON_HEDLEY_PELLES_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PELLES_VERSION) + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #undef JSON_HEDLEY_MCST_LCC_VERSION +#endif +#if defined(__LCC__) && defined(__LCC_MINOR__) + #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) + #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION) + #undef JSON_HEDLEY_GCC_VERSION +#endif +#if \ + defined(JSON_HEDLEY_GNUC_VERSION) && \ + !defined(__clang__) && \ + !defined(JSON_HEDLEY_INTEL_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_ARM_VERSION) && \ + !defined(JSON_HEDLEY_CRAY_VERSION) && \ + !defined(JSON_HEDLEY_TI_VERSION) && \ + !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ + !defined(__COMPCERT__) && \ + !defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_ATTRIBUTE +#endif +#if \ + defined(__has_attribute) && \ + ( \ + (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ + ) +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) +#else +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE +#endif +#if \ + defined(__has_cpp_attribute) && \ + defined(__cplusplus) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS +#endif +#if !defined(__cplusplus) || !defined(__has_cpp_attribute) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#elif \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ + (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_BUILTIN) + #undef JSON_HEDLEY_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else + #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) + #undef JSON_HEDLEY_GNUC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) + #undef JSON_HEDLEY_GCC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_FEATURE) + #undef JSON_HEDLEY_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) +#else + #define JSON_HEDLEY_HAS_FEATURE(feature) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) + #undef JSON_HEDLEY_GNUC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_FEATURE) + #undef JSON_HEDLEY_GCC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_EXTENSION) + #undef JSON_HEDLEY_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) +#else + #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) + #undef JSON_HEDLEY_GNUC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) + #undef JSON_HEDLEY_GCC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_WARNING) + #undef JSON_HEDLEY_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) +#else + #define JSON_HEDLEY_HAS_WARNING(warning) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_WARNING) + #undef JSON_HEDLEY_GNUC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_WARNING) + #undef JSON_HEDLEY_GCC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + defined(__clang__) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ + (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) + #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_PRAGMA(value) __pragma(value) +#else + #define JSON_HEDLEY_PRAGMA(value) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) + #undef JSON_HEDLEY_DIAGNOSTIC_PUSH +#endif +#if defined(JSON_HEDLEY_DIAGNOSTIC_POP) + #undef JSON_HEDLEY_DIAGNOSTIC_POP +#endif +#if defined(__clang__) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) + #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) +#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_PUSH + #define JSON_HEDLEY_DIAGNOSTIC_POP +#endif + +/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") +# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") +# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# endif +#endif +#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x +#endif + +#if defined(JSON_HEDLEY_CONST_CAST) + #undef JSON_HEDLEY_CONST_CAST +#endif +#if defined(__cplusplus) +# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) +#elif \ + JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_REINTERPRET_CAST) + #undef JSON_HEDLEY_REINTERPRET_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) +#else + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_STATIC_CAST) + #undef JSON_HEDLEY_STATIC_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) +#else + #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_CPP_CAST) + #undef JSON_HEDLEY_CPP_CAST +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ + ((T) (expr)) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("diag_suppress=Pe137") \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) +# endif +#else +# define JSON_HEDLEY_CPP_CAST(T, expr) (expr) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunused-function") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif + +#if defined(JSON_HEDLEY_DEPRECATED) + #undef JSON_HEDLEY_DEPRECATED +#endif +#if defined(JSON_HEDLEY_DEPRECATED_FOR) + #undef JSON_HEDLEY_DEPRECATED_FOR +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) +#elif \ + (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) +#elif defined(__cplusplus) && (__cplusplus >= 201402L) + #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") +#else + #define JSON_HEDLEY_DEPRECATED(since) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) +#endif + +#if defined(JSON_HEDLEY_UNAVAILABLE) + #undef JSON_HEDLEY_UNAVAILABLE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) +#else + #define JSON_HEDLEY_UNAVAILABLE(available_since) +#endif + +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT +#endif +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) +#elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) +#elif defined(_Check_return_) /* SAL */ + #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ +#else + #define JSON_HEDLEY_WARN_UNUSED_RESULT + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) +#endif + +#if defined(JSON_HEDLEY_SENTINEL) + #undef JSON_HEDLEY_SENTINEL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) +#else + #define JSON_HEDLEY_SENTINEL(position) +#endif + +#if defined(JSON_HEDLEY_NO_RETURN) + #undef JSON_HEDLEY_NO_RETURN +#endif +#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NO_RETURN __noreturn +#elif \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L + #define JSON_HEDLEY_NO_RETURN _Noreturn +#elif defined(__cplusplus) && (__cplusplus >= 201103L) + #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#else + #define JSON_HEDLEY_NO_RETURN +#endif + +#if defined(JSON_HEDLEY_NO_ESCAPE) + #undef JSON_HEDLEY_NO_ESCAPE +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) + #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) +#else + #define JSON_HEDLEY_NO_ESCAPE +#endif + +#if defined(JSON_HEDLEY_UNREACHABLE) + #undef JSON_HEDLEY_UNREACHABLE +#endif +#if defined(JSON_HEDLEY_UNREACHABLE_RETURN) + #undef JSON_HEDLEY_UNREACHABLE_RETURN +#endif +#if defined(JSON_HEDLEY_ASSUME) + #undef JSON_HEDLEY_ASSUME +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_ASSUME(expr) __assume(expr) +#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) + #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) +#elif \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #if defined(__cplusplus) + #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) + #else + #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) + #endif +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() +#elif defined(JSON_HEDLEY_ASSUME) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif +#if !defined(JSON_HEDLEY_ASSUME) + #if defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) + #else + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) + #endif +#endif +#if defined(JSON_HEDLEY_UNREACHABLE) + #if \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) + #else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() + #endif +#else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) +#endif +#if !defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif + +JSON_HEDLEY_DIAGNOSTIC_PUSH +#if JSON_HEDLEY_HAS_WARNING("-Wpedantic") + #pragma clang diagnostic ignored "-Wpedantic" +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) + #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" +#endif +#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wvariadic-macros" + #elif defined(JSON_HEDLEY_GCC_VERSION) + #pragma GCC diagnostic ignored "-Wvariadic-macros" + #endif +#endif +#if defined(JSON_HEDLEY_NON_NULL) + #undef JSON_HEDLEY_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) +#else + #define JSON_HEDLEY_NON_NULL(...) +#endif +JSON_HEDLEY_DIAGNOSTIC_POP + +#if defined(JSON_HEDLEY_PRINTF_FORMAT) + #undef JSON_HEDLEY_PRINTF_FORMAT +#endif +#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) +#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) +#else + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) +#endif + +#if defined(JSON_HEDLEY_CONSTEXPR) + #undef JSON_HEDLEY_CONSTEXPR +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) + #endif +#endif +#if !defined(JSON_HEDLEY_CONSTEXPR) + #define JSON_HEDLEY_CONSTEXPR +#endif + +#if defined(JSON_HEDLEY_PREDICT) + #undef JSON_HEDLEY_PREDICT +#endif +#if defined(JSON_HEDLEY_LIKELY) + #undef JSON_HEDLEY_LIKELY +#endif +#if defined(JSON_HEDLEY_UNLIKELY) + #undef JSON_HEDLEY_UNLIKELY +#endif +#if defined(JSON_HEDLEY_UNPREDICTABLE) + #undef JSON_HEDLEY_UNPREDICTABLE +#endif +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) + #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) +#elif \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ + (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ + })) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ + })) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) +# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) +#endif +#if !defined(JSON_HEDLEY_UNPREDICTABLE) + #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) +#endif + +#if defined(JSON_HEDLEY_MALLOC) + #undef JSON_HEDLEY_MALLOC +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_MALLOC __declspec(restrict) +#else + #define JSON_HEDLEY_MALLOC +#endif + +#if defined(JSON_HEDLEY_PURE) + #undef JSON_HEDLEY_PURE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PURE __attribute__((__pure__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) +# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ + ) +# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") +#else +# define JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_CONST) + #undef JSON_HEDLEY_CONST +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_CONST __attribute__((__const__)) +#elif \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_CONST _Pragma("no_side_effect") +#else + #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_RESTRICT) + #undef JSON_HEDLEY_RESTRICT +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT restrict +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + defined(__clang__) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RESTRICT __restrict +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT _Restrict +#else + #define JSON_HEDLEY_RESTRICT +#endif + +#if defined(JSON_HEDLEY_INLINE) + #undef JSON_HEDLEY_INLINE +#endif +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + (defined(__cplusplus) && (__cplusplus >= 199711L)) + #define JSON_HEDLEY_INLINE inline +#elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) + #define JSON_HEDLEY_INLINE __inline__ +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_INLINE __inline +#else + #define JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_ALWAYS_INLINE) + #undef JSON_HEDLEY_ALWAYS_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) +# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_ALWAYS_INLINE __forceinline +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ + ) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") +#else +# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_NEVER_INLINE) + #undef JSON_HEDLEY_NEVER_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#else + #define JSON_HEDLEY_NEVER_INLINE +#endif + +#if defined(JSON_HEDLEY_PRIVATE) + #undef JSON_HEDLEY_PRIVATE +#endif +#if defined(JSON_HEDLEY_PUBLIC) + #undef JSON_HEDLEY_PUBLIC +#endif +#if defined(JSON_HEDLEY_IMPORT) + #undef JSON_HEDLEY_IMPORT +#endif +#if defined(_WIN32) || defined(__CYGWIN__) +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC __declspec(dllexport) +# define JSON_HEDLEY_IMPORT __declspec(dllimport) +#else +# if \ + JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + ( \ + defined(__TI_EABI__) && \ + ( \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ + ) \ + ) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) +# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) +# else +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC +# endif +# define JSON_HEDLEY_IMPORT extern +#endif + +#if defined(JSON_HEDLEY_NO_THROW) + #undef JSON_HEDLEY_NO_THROW +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NO_THROW __declspec(nothrow) +#else + #define JSON_HEDLEY_NO_THROW +#endif + +#if defined(JSON_HEDLEY_FALL_THROUGH) + #undef JSON_HEDLEY_FALL_THROUGH +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) +#elif defined(__fallthrough) /* SAL */ + #define JSON_HEDLEY_FALL_THROUGH __fallthrough +#else + #define JSON_HEDLEY_FALL_THROUGH +#endif + +#if defined(JSON_HEDLEY_RETURNS_NON_NULL) + #undef JSON_HEDLEY_RETURNS_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) +#elif defined(_Ret_notnull_) /* SAL */ + #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ +#else + #define JSON_HEDLEY_RETURNS_NON_NULL +#endif + +#if defined(JSON_HEDLEY_ARRAY_PARAM) + #undef JSON_HEDLEY_ARRAY_PARAM +#endif +#if \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ + !defined(__STDC_NO_VLA__) && \ + !defined(__cplusplus) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_ARRAY_PARAM(name) (name) +#else + #define JSON_HEDLEY_ARRAY_PARAM(name) +#endif + +#if defined(JSON_HEDLEY_IS_CONSTANT) + #undef JSON_HEDLEY_IS_CONSTANT +#endif +#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) + #undef JSON_HEDLEY_REQUIRE_CONSTEXPR +#endif +/* JSON_HEDLEY_IS_CONSTEXPR_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #undef JSON_HEDLEY_IS_CONSTEXPR_ +#endif +#if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) +#endif +#if !defined(__cplusplus) +# if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) +#endif +# elif \ + ( \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ + !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION)) || \ + (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) +#endif +# elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + defined(JSON_HEDLEY_INTEL_VERSION) || \ + defined(JSON_HEDLEY_TINYC_VERSION) || \ + defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ + defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ + defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ + defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ + defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ + defined(__clang__) +# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ + sizeof(void) != \ + sizeof(*( \ + 1 ? \ + ((void*) ((expr) * 0L) ) : \ +((struct { char v[sizeof(void) * 2]; } *) 1) \ + ) \ + ) \ + ) +# endif +#endif +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) +#else + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) (0) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) +#endif + +#if defined(JSON_HEDLEY_BEGIN_C_DECLS) + #undef JSON_HEDLEY_BEGIN_C_DECLS +#endif +#if defined(JSON_HEDLEY_END_C_DECLS) + #undef JSON_HEDLEY_END_C_DECLS +#endif +#if defined(JSON_HEDLEY_C_DECL) + #undef JSON_HEDLEY_C_DECL +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { + #define JSON_HEDLEY_END_C_DECLS } + #define JSON_HEDLEY_C_DECL extern "C" +#else + #define JSON_HEDLEY_BEGIN_C_DECLS + #define JSON_HEDLEY_END_C_DECLS + #define JSON_HEDLEY_C_DECL +#endif + +#if defined(JSON_HEDLEY_STATIC_ASSERT) + #undef JSON_HEDLEY_STATIC_ASSERT +#endif +#if \ + !defined(__cplusplus) && ( \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ + (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + defined(_Static_assert) \ + ) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) +#elif \ + (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) +#else +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) +#endif + +#if defined(JSON_HEDLEY_NULL) + #undef JSON_HEDLEY_NULL +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) + #elif defined(NULL) + #define JSON_HEDLEY_NULL NULL + #else + #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) + #endif +#elif defined(NULL) + #define JSON_HEDLEY_NULL NULL +#else + #define JSON_HEDLEY_NULL ((void*) 0) +#endif + +#if defined(JSON_HEDLEY_MESSAGE) + #undef JSON_HEDLEY_MESSAGE +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_MESSAGE(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(message msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) +#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_WARNING) + #undef JSON_HEDLEY_WARNING +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_WARNING(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(clang warning msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_REQUIRE) + #undef JSON_HEDLEY_REQUIRE +#endif +#if defined(JSON_HEDLEY_REQUIRE_MSG) + #undef JSON_HEDLEY_REQUIRE_MSG +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) +# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") +# define JSON_HEDLEY_REQUIRE(expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), #expr, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), msg, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) +# endif +#else +# define JSON_HEDLEY_REQUIRE(expr) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) +#endif + +#if defined(JSON_HEDLEY_FLAGS) + #undef JSON_HEDLEY_FLAGS +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) + #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) +#else + #define JSON_HEDLEY_FLAGS +#endif + +#if defined(JSON_HEDLEY_FLAGS_CAST) + #undef JSON_HEDLEY_FLAGS_CAST +#endif +#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) +# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("warning(disable:188)") \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) +#endif + +#if defined(JSON_HEDLEY_EMPTY_BASES) + #undef JSON_HEDLEY_EMPTY_BASES +#endif +#if \ + (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) +#else + #define JSON_HEDLEY_EMPTY_BASES +#endif + +/* Remaining macros are deprecated. */ + +#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK +#endif +#if defined(__clang__) + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) +#else + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) + #undef JSON_HEDLEY_CLANG_HAS_BUILTIN +#endif +#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) + +#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) + #undef JSON_HEDLEY_CLANG_HAS_FEATURE +#endif +#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) + +#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) + #undef JSON_HEDLEY_CLANG_HAS_EXTENSION +#endif +#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) + +#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_WARNING) + #undef JSON_HEDLEY_CLANG_HAS_WARNING +#endif +#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) + +#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ + + +// This file contains all internal macro definitions (except those affecting ABI) +// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them + +// #include + + +// exclude unsupported compilers +#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) + #if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #endif +#endif + +// C++ language standard detection +// if the user manually specified the used c++ version this is skipped +#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) + #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) + #define JSON_HAS_CPP_20 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #define JSON_HAS_CPP_14 + #endif + // the cpp 11 flag is always specified because it is the minimal required version + #define JSON_HAS_CPP_11 +#endif + +#ifdef __has_include + #if __has_include() + #include + #endif +#endif + +#if !defined(JSON_HAS_FILESYSTEM) && !defined(JSON_HAS_EXPERIMENTAL_FILESYSTEM) + #ifdef JSON_HAS_CPP_17 + #if defined(__cpp_lib_filesystem) + #define JSON_HAS_FILESYSTEM 1 + #elif defined(__cpp_lib_experimental_filesystem) + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #elif !defined(__has_include) + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #elif __has_include() + #define JSON_HAS_FILESYSTEM 1 + #elif __has_include() + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 + #endif + + // std::filesystem does not work on MinGW GCC 8: https://sourceforge.net/p/mingw-w64/bugs/737/ + #if defined(__MINGW32__) && defined(__GNUC__) && __GNUC__ == 8 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before GCC 8: https://en.cppreference.com/w/cpp/compiler_support + #if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before Clang 7: https://en.cppreference.com/w/cpp/compiler_support + #if defined(__clang_major__) && __clang_major__ < 7 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before MSVC 19.14: https://en.cppreference.com/w/cpp/compiler_support + #if defined(_MSC_VER) && _MSC_VER < 1914 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before iOS 13 + #if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 130000 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + + // no filesystem support before macOS Catalina + #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500 + #undef JSON_HAS_FILESYSTEM + #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #endif + #endif +#endif + +#ifndef JSON_HAS_EXPERIMENTAL_FILESYSTEM + #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 0 +#endif + +#ifndef JSON_HAS_FILESYSTEM + #define JSON_HAS_FILESYSTEM 0 +#endif + +#ifndef JSON_HAS_THREE_WAY_COMPARISON + #if defined(__cpp_impl_three_way_comparison) && __cpp_impl_three_way_comparison >= 201907L \ + && defined(__cpp_lib_three_way_comparison) && __cpp_lib_three_way_comparison >= 201907L + #define JSON_HAS_THREE_WAY_COMPARISON 1 + #else + #define JSON_HAS_THREE_WAY_COMPARISON 0 + #endif +#endif + +#ifndef JSON_HAS_RANGES + // ranges header shipping in GCC 11.1.0 (released 2021-04-27) has syntax error + #if defined(__GLIBCXX__) && __GLIBCXX__ == 20210427 + #define JSON_HAS_RANGES 0 + #elif defined(__cpp_lib_ranges) + #define JSON_HAS_RANGES 1 + #else + #define JSON_HAS_RANGES 0 + #endif +#endif + +#ifndef JSON_HAS_STATIC_RTTI + #if !defined(_HAS_STATIC_RTTI) || _HAS_STATIC_RTTI != 0 + #define JSON_HAS_STATIC_RTTI 1 + #else + #define JSON_HAS_STATIC_RTTI 0 + #endif +#endif + +#ifdef JSON_HAS_CPP_17 + #define JSON_INLINE_VARIABLE inline +#else + #define JSON_INLINE_VARIABLE +#endif + +#if JSON_HEDLEY_HAS_ATTRIBUTE(no_unique_address) + #define JSON_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else + #define JSON_NO_UNIQUE_ADDRESS +#endif + +// disable documentation warnings on clang +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wdocumentation" + #pragma clang diagnostic ignored "-Wdocumentation-unknown-command" +#endif + +// allow disabling exceptions +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) + #define JSON_THROW(exception) throw exception + #define JSON_TRY try + #define JSON_CATCH(exception) catch(exception) + #define JSON_INTERNAL_CATCH(exception) catch(exception) +#else + #include + #define JSON_THROW(exception) std::abort() + #define JSON_TRY if(true) + #define JSON_CATCH(exception) if(false) + #define JSON_INTERNAL_CATCH(exception) if(false) +#endif + +// override exception macros +#if defined(JSON_THROW_USER) + #undef JSON_THROW + #define JSON_THROW JSON_THROW_USER +#endif +#if defined(JSON_TRY_USER) + #undef JSON_TRY + #define JSON_TRY JSON_TRY_USER +#endif +#if defined(JSON_CATCH_USER) + #undef JSON_CATCH + #define JSON_CATCH JSON_CATCH_USER + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#endif +#if defined(JSON_INTERNAL_CATCH_USER) + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER +#endif + +// allow overriding assert +#if !defined(JSON_ASSERT) + #include // assert + #define JSON_ASSERT(x) assert(x) +#endif + +// allow to access some private functions (needed by the test suite) +#if defined(JSON_TESTS_PRIVATE) + #define JSON_PRIVATE_UNLESS_TESTED public +#else + #define JSON_PRIVATE_UNLESS_TESTED private +#endif + +/*! +@brief macro to briefly define a mapping between an enum and JSON +@def NLOHMANN_JSON_SERIALIZE_ENUM +@since version 3.4.0 +*/ +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.first == e; \ + }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [&j](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.second == j; \ + }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ + } + +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. + +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template class ObjectType, \ + template class ArrayType, \ + class StringType, class BooleanType, class NumberIntegerType, \ + class NumberUnsignedType, class NumberFloatType, \ + template class AllocatorType, \ + template class JSONSerializer, \ + class BinaryType, \ + class CustomBaseClass> + +#define NLOHMANN_BASIC_JSON_TPL \ + basic_json + +// Macros to simplify conversion from/to types + +#define NLOHMANN_JSON_EXPAND( x ) x +#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME +#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ + NLOHMANN_JSON_PASTE64, \ + NLOHMANN_JSON_PASTE63, \ + NLOHMANN_JSON_PASTE62, \ + NLOHMANN_JSON_PASTE61, \ + NLOHMANN_JSON_PASTE60, \ + NLOHMANN_JSON_PASTE59, \ + NLOHMANN_JSON_PASTE58, \ + NLOHMANN_JSON_PASTE57, \ + NLOHMANN_JSON_PASTE56, \ + NLOHMANN_JSON_PASTE55, \ + NLOHMANN_JSON_PASTE54, \ + NLOHMANN_JSON_PASTE53, \ + NLOHMANN_JSON_PASTE52, \ + NLOHMANN_JSON_PASTE51, \ + NLOHMANN_JSON_PASTE50, \ + NLOHMANN_JSON_PASTE49, \ + NLOHMANN_JSON_PASTE48, \ + NLOHMANN_JSON_PASTE47, \ + NLOHMANN_JSON_PASTE46, \ + NLOHMANN_JSON_PASTE45, \ + NLOHMANN_JSON_PASTE44, \ + NLOHMANN_JSON_PASTE43, \ + NLOHMANN_JSON_PASTE42, \ + NLOHMANN_JSON_PASTE41, \ + NLOHMANN_JSON_PASTE40, \ + NLOHMANN_JSON_PASTE39, \ + NLOHMANN_JSON_PASTE38, \ + NLOHMANN_JSON_PASTE37, \ + NLOHMANN_JSON_PASTE36, \ + NLOHMANN_JSON_PASTE35, \ + NLOHMANN_JSON_PASTE34, \ + NLOHMANN_JSON_PASTE33, \ + NLOHMANN_JSON_PASTE32, \ + NLOHMANN_JSON_PASTE31, \ + NLOHMANN_JSON_PASTE30, \ + NLOHMANN_JSON_PASTE29, \ + NLOHMANN_JSON_PASTE28, \ + NLOHMANN_JSON_PASTE27, \ + NLOHMANN_JSON_PASTE26, \ + NLOHMANN_JSON_PASTE25, \ + NLOHMANN_JSON_PASTE24, \ + NLOHMANN_JSON_PASTE23, \ + NLOHMANN_JSON_PASTE22, \ + NLOHMANN_JSON_PASTE21, \ + NLOHMANN_JSON_PASTE20, \ + NLOHMANN_JSON_PASTE19, \ + NLOHMANN_JSON_PASTE18, \ + NLOHMANN_JSON_PASTE17, \ + NLOHMANN_JSON_PASTE16, \ + NLOHMANN_JSON_PASTE15, \ + NLOHMANN_JSON_PASTE14, \ + NLOHMANN_JSON_PASTE13, \ + NLOHMANN_JSON_PASTE12, \ + NLOHMANN_JSON_PASTE11, \ + NLOHMANN_JSON_PASTE10, \ + NLOHMANN_JSON_PASTE9, \ + NLOHMANN_JSON_PASTE8, \ + NLOHMANN_JSON_PASTE7, \ + NLOHMANN_JSON_PASTE6, \ + NLOHMANN_JSON_PASTE5, \ + NLOHMANN_JSON_PASTE4, \ + NLOHMANN_JSON_PASTE3, \ + NLOHMANN_JSON_PASTE2, \ + NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) +#define NLOHMANN_JSON_PASTE2(func, v1) func(v1) +#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) +#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) +#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) +#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) +#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) +#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) +#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) +#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) +#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) +#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) +#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) +#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) +#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) +#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) +#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) +#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) +#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) +#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) +#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) +#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) +#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) +#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) +#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) +#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) +#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) +#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) +#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) +#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) +#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) +#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) +#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) +#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) +#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) +#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) +#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) +#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) +#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) +#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) +#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) +#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) +#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) +#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) +#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) +#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) +#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) +#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) +#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) +#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) +#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) +#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) +#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) +#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) +#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) +#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) +#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) +#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) +#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) +#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) +#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) +#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) +#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) +#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) + +#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; +#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); +#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +// inspired from https://stackoverflow.com/a/26745591 +// allows to call any std function as if (e.g. with begin): +// using std::begin; begin(x); +// +// it allows using the detected idiom to retrieve the return type +// of such an expression +#define NLOHMANN_CAN_CALL_STD_FUNC_IMPL(std_name) \ + namespace detail { \ + using std::std_name; \ + \ + template \ + using result_of_##std_name = decltype(std_name(std::declval()...)); \ + } \ + \ + namespace detail2 { \ + struct std_name##_tag \ + { \ + }; \ + \ + template \ + std_name##_tag std_name(T&&...); \ + \ + template \ + using result_of_##std_name = decltype(std_name(std::declval()...)); \ + \ + template \ + struct would_call_std_##std_name \ + { \ + static constexpr auto const value = ::nlohmann::detail:: \ + is_detected_exact::value; \ + }; \ + } /* namespace detail2 */ \ + \ + template \ + struct would_call_std_##std_name : detail2::would_call_std_##std_name \ + { \ + } + +#ifndef JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_USE_IMPLICIT_CONVERSIONS 1 +#endif + +#if JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_EXPLICIT +#else + #define JSON_EXPLICIT explicit +#endif + +#ifndef JSON_DISABLE_ENUM_SERIALIZATION + #define JSON_DISABLE_ENUM_SERIALIZATION 0 +#endif + +#ifndef JSON_USE_GLOBAL_UDLS + #define JSON_USE_GLOBAL_UDLS 1 +#endif + +#if JSON_HAS_THREE_WAY_COMPARISON + #include // partial_ordering +#endif + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/////////////////////////// +// JSON type enumeration // +/////////////////////////// + +/*! +@brief the JSON type enumeration + +This enumeration collects the different JSON types. It is internally used to +distinguish the stored values, and the functions @ref basic_json::is_null(), +@ref basic_json::is_object(), @ref basic_json::is_array(), +@ref basic_json::is_string(), @ref basic_json::is_boolean(), +@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), +@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), +@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and +@ref basic_json::is_structured() rely on it. + +@note There are three enumeration entries (number_integer, number_unsigned, and +number_float), because the library distinguishes these three types for numbers: +@ref basic_json::number_unsigned_t is used for unsigned integers, +@ref basic_json::number_integer_t is used for signed integers, and +@ref basic_json::number_float_t is used for floating-point numbers or to +approximate integers which do not fit in the limits of their respective type. + +@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON +value with the default value for a given type + +@since version 1.0.0 +*/ +enum class value_t : std::uint8_t +{ + null, ///< null value + object, ///< object (unordered set of name/value pairs) + array, ///< array (ordered collection of values) + string, ///< string value + boolean, ///< boolean value + number_integer, ///< number value (signed integer) + number_unsigned, ///< number value (unsigned integer) + number_float, ///< number value (floating-point) + binary, ///< binary array (ordered collection of bytes) + discarded ///< discarded by the parser callback function +}; + +/*! +@brief comparison operator for JSON types + +Returns an ordering that is similar to Python: +- order: null < boolean < number < object < array < string < binary +- furthermore, each type is not smaller than itself +- discarded values are not comparable +- binary is represented as a b"" string in python and directly comparable to a + string; however, making a binary array directly comparable with a string would + be surprising behavior in a JSON file. + +@since version 1.0.0 +*/ +#if JSON_HAS_THREE_WAY_COMPARISON + inline std::partial_ordering operator<=>(const value_t lhs, const value_t rhs) noexcept // *NOPAD* +#else + inline bool operator<(const value_t lhs, const value_t rhs) noexcept +#endif +{ + static constexpr std::array order = {{ + 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, + 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, + 6 /* binary */ + } + }; + + const auto l_index = static_cast(lhs); + const auto r_index = static_cast(rhs); +#if JSON_HAS_THREE_WAY_COMPARISON + if (l_index < order.size() && r_index < order.size()) + { + return order[l_index] <=> order[r_index]; // *NOPAD* + } + return std::partial_ordering::unordered; +#else + return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; +#endif +} + +// GCC selects the built-in operator< over an operator rewritten from +// a user-defined spaceship operator +// Clang, MSVC, and ICC select the rewritten candidate +// (see GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105200) +#if JSON_HAS_THREE_WAY_COMPARISON && defined(__GNUC__) +inline bool operator<(const value_t lhs, const value_t rhs) noexcept +{ + return std::is_lt(lhs <=> rhs); // *NOPAD* +} +#endif + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/*! +@brief replace all occurrences of a substring by another string + +@param[in,out] s the string to manipulate; changed so that all + occurrences of @a f are replaced with @a t +@param[in] f the substring to replace with @a t +@param[in] t the string to replace @a f + +@pre The search string @a f must not be empty. **This precondition is +enforced with an assertion.** + +@since version 2.0.0 +*/ +template +inline void replace_substring(StringType& s, const StringType& f, + const StringType& t) +{ + JSON_ASSERT(!f.empty()); + for (auto pos = s.find(f); // find first occurrence of f + pos != StringType::npos; // make sure f was found + s.replace(pos, f.size(), t), // replace with t, and + pos = s.find(f, pos + t.size())) // find next occurrence of f + {} +} + +/*! + * @brief string escaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to escape + * @return escaped string + * + * Note the order of escaping "~" to "~0" and "/" to "~1" is important. + */ +template +inline StringType escape(StringType s) +{ + replace_substring(s, StringType{"~"}, StringType{"~0"}); + replace_substring(s, StringType{"/"}, StringType{"~1"}); + return s; +} + +/*! + * @brief string unescaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to unescape + * @return unescaped string + * + * Note the order of escaping "~1" to "/" and "~0" to "~" is important. + */ +template +static void unescape(StringType& s) +{ + replace_substring(s, StringType{"~1"}, StringType{"/"}); + replace_substring(s, StringType{"~0"}, StringType{"~"}); +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // size_t + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +/// struct to capture the start position of the current token +struct position_t +{ + /// the total number of characters read + std::size_t chars_read_total = 0; + /// the number of characters read in the current line + std::size_t chars_read_current_line = 0; + /// the number of lines read + std::size_t lines_read = 0; + + /// conversion to size_t to preserve SAX interface + constexpr operator size_t() const + { + return chars_read_total; + } +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2018 The Abseil Authors +// SPDX-License-Identifier: MIT + + + +#include // array +#include // size_t +#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type +#include // index_sequence, make_index_sequence, index_sequence_for + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +using uncvref_t = typename std::remove_cv::type>::type; + +#ifdef JSON_HAS_CPP_14 + +// the following utilities are natively available in C++14 +using std::enable_if_t; +using std::index_sequence; +using std::make_index_sequence; +using std::index_sequence_for; + +#else + +// alias templates to reduce boilerplate +template +using enable_if_t = typename std::enable_if::type; + +// The following code is taken from https://github.com/abseil/abseil-cpp/blob/10cb35e459f5ecca5b2ff107635da0bfa41011b4/absl/utility/utility.h +// which is part of Google Abseil (https://github.com/abseil/abseil-cpp), licensed under the Apache License 2.0. + +//// START OF CODE FROM GOOGLE ABSEIL + +// integer_sequence +// +// Class template representing a compile-time integer sequence. An instantiation +// of `integer_sequence` has a sequence of integers encoded in its +// type through its template arguments (which is a common need when +// working with C++11 variadic templates). `absl::integer_sequence` is designed +// to be a drop-in replacement for C++14's `std::integer_sequence`. +// +// Example: +// +// template< class T, T... Ints > +// void user_function(integer_sequence); +// +// int main() +// { +// // user_function's `T` will be deduced to `int` and `Ints...` +// // will be deduced to `0, 1, 2, 3, 4`. +// user_function(make_integer_sequence()); +// } +template +struct integer_sequence +{ + using value_type = T; + static constexpr std::size_t size() noexcept + { + return sizeof...(Ints); + } +}; + +// index_sequence +// +// A helper template for an `integer_sequence` of `size_t`, +// `absl::index_sequence` is designed to be a drop-in replacement for C++14's +// `std::index_sequence`. +template +using index_sequence = integer_sequence; + +namespace utility_internal +{ + +template +struct Extend; + +// Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency. +template +struct Extend, SeqSize, 0> +{ + using type = integer_sequence < T, Ints..., (Ints + SeqSize)... >; +}; + +template +struct Extend, SeqSize, 1> +{ + using type = integer_sequence < T, Ints..., (Ints + SeqSize)..., 2 * SeqSize >; +}; + +// Recursion helper for 'make_integer_sequence'. +// 'Gen::type' is an alias for 'integer_sequence'. +template +struct Gen +{ + using type = + typename Extend < typename Gen < T, N / 2 >::type, N / 2, N % 2 >::type; +}; + +template +struct Gen +{ + using type = integer_sequence; +}; + +} // namespace utility_internal + +// Compile-time sequences of integers + +// make_integer_sequence +// +// This template alias is equivalent to +// `integer_sequence`, and is designed to be a drop-in +// replacement for C++14's `std::make_integer_sequence`. +template +using make_integer_sequence = typename utility_internal::Gen::type; + +// make_index_sequence +// +// This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`, +// and is designed to be a drop-in replacement for C++14's +// `std::make_index_sequence`. +template +using make_index_sequence = make_integer_sequence; + +// index_sequence_for +// +// Converts a typename pack into an index sequence of the same length, and +// is designed to be a drop-in replacement for C++14's +// `std::index_sequence_for()` +template +using index_sequence_for = make_index_sequence; + +//// END OF CODE FROM GOOGLE ABSEIL + +#endif + +// dispatch utility (taken from ranges-v3) +template struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; + +// taken from ranges-v3 +template +struct static_const +{ + static JSON_INLINE_VARIABLE constexpr T value{}; +}; + +#ifndef JSON_HAS_CPP_17 + template + constexpr T static_const::value; +#endif + +template +inline constexpr std::array make_array(Args&& ... args) +{ + return std::array {{static_cast(std::forward(args))...}}; +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // numeric_limits +#include // false_type, is_constructible, is_integral, is_same, true_type +#include // declval +#include // tuple +#include // char_traits + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // random_access_iterator_tag + +// #include + +// #include + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +struct iterator_types {}; + +template +struct iterator_types < + It, + void_t> +{ + using difference_type = typename It::difference_type; + using value_type = typename It::value_type; + using pointer = typename It::pointer; + using reference = typename It::reference; + using iterator_category = typename It::iterator_category; +}; + +// This is required as some compilers implement std::iterator_traits in a way that +// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. +template +struct iterator_traits +{ +}; + +template +struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> + : iterator_types +{ +}; + +template +struct iterator_traits::value>> +{ + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = T*; + using reference = T&; +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN + +NLOHMANN_CAN_CALL_STD_FUNC_IMPL(begin); + +NLOHMANN_JSON_NAMESPACE_END + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN + +NLOHMANN_CAN_CALL_STD_FUNC_IMPL(end); + +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.11.3 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-License-Identifier: MIT + +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ + #define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + + #include // int64_t, uint64_t + #include // map + #include // allocator + #include // string + #include // vector + + // #include + + + /*! + @brief namespace for Niels Lohmann + @see https://github.com/nlohmann + @since version 1.0.0 + */ + NLOHMANN_JSON_NAMESPACE_BEGIN + + /*! + @brief default JSONSerializer template argument + + This serializer ignores the template arguments and uses ADL + ([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) + for serialization. + */ + template + struct adl_serializer; + + /// a class to store JSON values + /// @sa https://json.nlohmann.me/api/basic_json/ + template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector, // cppcheck-suppress syntaxError + class CustomBaseClass = void> + class basic_json; + + /// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document + /// @sa https://json.nlohmann.me/api/json_pointer/ + template + class json_pointer; + + /*! + @brief default specialization + @sa https://json.nlohmann.me/api/json/ + */ + using json = basic_json<>; + + /// @brief a minimal map-like container that preserves insertion order + /// @sa https://json.nlohmann.me/api/ordered_map/ + template + struct ordered_map; + + /// @brief specialization that maintains the insertion order of object keys + /// @sa https://json.nlohmann.me/api/ordered_json/ + using ordered_json = basic_json; + + NLOHMANN_JSON_NAMESPACE_END + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ + + +NLOHMANN_JSON_NAMESPACE_BEGIN +/*! +@brief detail namespace with internal helper functions + +This namespace collects functions that should not be exposed, +implementations of some @ref basic_json methods, and meta-programming helpers. + +@since version 2.1.0 +*/ +namespace detail +{ + +///////////// +// helpers // +///////////// + +// Note to maintainers: +// +// Every trait in this file expects a non CV-qualified type. +// The only exceptions are in the 'aliases for detected' section +// (i.e. those of the form: decltype(T::member_function(std::declval()))) +// +// In this case, T has to be properly CV-qualified to constraint the function arguments +// (e.g. to_json(BasicJsonType&, const T&)) + +template struct is_basic_json : std::false_type {}; + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +struct is_basic_json : std::true_type {}; + +// used by exceptions create() member functions +// true_type for pointer to possibly cv-qualified basic_json or std::nullptr_t +// false_type otherwise +template +struct is_basic_json_context : + std::integral_constant < bool, + is_basic_json::type>::type>::value + || std::is_same::value > +{}; + +////////////////////// +// json_ref helpers // +////////////////////// + +template +class json_ref; + +template +struct is_json_ref : std::false_type {}; + +template +struct is_json_ref> : std::true_type {}; + +////////////////////////// +// aliases for detected // +////////////////////////// + +template +using mapped_type_t = typename T::mapped_type; + +template +using key_type_t = typename T::key_type; + +template +using value_type_t = typename T::value_type; + +template +using difference_type_t = typename T::difference_type; + +template +using pointer_t = typename T::pointer; + +template +using reference_t = typename T::reference; + +template +using iterator_category_t = typename T::iterator_category; + +template +using to_json_function = decltype(T::to_json(std::declval()...)); + +template +using from_json_function = decltype(T::from_json(std::declval()...)); + +template +using get_template_function = decltype(std::declval().template get()); + +// trait checking if JSONSerializer::from_json(json const&, udt&) exists +template +struct has_from_json : std::false_type {}; + +// trait checking if j.get is valid +// use this trait instead of std::is_constructible or std::is_convertible, +// both rely on, or make use of implicit conversions, and thus fail when T +// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) +template +struct is_getable +{ + static constexpr bool value = is_detected::value; +}; + +template +struct has_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if JSONSerializer::from_json(json const&) exists +// this overload is used for non-default-constructible user-defined-types +template +struct has_non_default_from_json : std::false_type {}; + +template +struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if BasicJsonType::json_serializer::to_json exists +// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. +template +struct has_to_json : std::false_type {}; + +template +struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +template +using detect_key_compare = typename T::key_compare; + +template +struct has_key_compare : std::integral_constant::value> {}; + +// obtains the actual object key comparator +template +struct actual_object_comparator +{ + using object_t = typename BasicJsonType::object_t; + using object_comparator_t = typename BasicJsonType::default_object_comparator_t; + using type = typename std::conditional < has_key_compare::value, + typename object_t::key_compare, object_comparator_t>::type; +}; + +template +using actual_object_comparator_t = typename actual_object_comparator::type; + +///////////////// +// char_traits // +///////////////// + +// Primary template of char_traits calls std char_traits +template +struct char_traits : std::char_traits +{}; + +// Explicitly define char traits for unsigned char since it is not standard +template<> +struct char_traits : std::char_traits +{ + using char_type = unsigned char; + using int_type = uint64_t; + + // Redefine to_int_type function + static int_type to_int_type(char_type c) noexcept + { + return static_cast(c); + } + + static char_type to_char_type(int_type i) noexcept + { + return static_cast(i); + } + + static constexpr int_type eof() noexcept + { + return static_cast(EOF); + } +}; + +// Explicitly define char traits for signed char since it is not standard +template<> +struct char_traits : std::char_traits +{ + using char_type = signed char; + using int_type = uint64_t; + + // Redefine to_int_type function + static int_type to_int_type(char_type c) noexcept + { + return static_cast(c); + } + + static char_type to_char_type(int_type i) noexcept + { + return static_cast(i); + } + + static constexpr int_type eof() noexcept + { + return static_cast(EOF); + } +}; + +/////////////////// +// is_ functions // +/////////////////// + +// https://en.cppreference.com/w/cpp/types/conjunction +template struct conjunction : std::true_type { }; +template struct conjunction : B { }; +template +struct conjunction +: std::conditional(B::value), conjunction, B>::type {}; + +// https://en.cppreference.com/w/cpp/types/negation +template struct negation : std::integral_constant < bool, !B::value > { }; + +// Reimplementation of is_constructible and is_default_constructible, due to them being broken for +// std::pair and std::tuple until LWG 2367 fix (see https://cplusplus.github.io/LWG/lwg-defects.html#2367). +// This causes compile errors in e.g. clang 3.5 or gcc 4.9. +template +struct is_default_constructible : std::is_default_constructible {}; + +template +struct is_default_constructible> + : conjunction, is_default_constructible> {}; + +template +struct is_default_constructible> + : conjunction, is_default_constructible> {}; + +template +struct is_default_constructible> + : conjunction...> {}; + +template +struct is_default_constructible> + : conjunction...> {}; + +template +struct is_constructible : std::is_constructible {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_constructible> : is_default_constructible> {}; + +template +struct is_iterator_traits : std::false_type {}; + +template +struct is_iterator_traits> +{ + private: + using traits = iterator_traits; + + public: + static constexpr auto value = + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value; +}; + +template +struct is_range +{ + private: + using t_ref = typename std::add_lvalue_reference::type; + + using iterator = detected_t; + using sentinel = detected_t; + + // to be 100% correct, it should use https://en.cppreference.com/w/cpp/iterator/input_or_output_iterator + // and https://en.cppreference.com/w/cpp/iterator/sentinel_for + // but reimplementing these would be too much work, as a lot of other concepts are used underneath + static constexpr auto is_iterator_begin = + is_iterator_traits>::value; + + public: + static constexpr bool value = !std::is_same::value && !std::is_same::value && is_iterator_begin; +}; + +template +using iterator_t = enable_if_t::value, result_of_begin())>>; + +template +using range_value_t = value_type_t>>; + +// The following implementation of is_complete_type is taken from +// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ +// and is written by Xiang Fan who agreed to using it in this library. + +template +struct is_complete_type : std::false_type {}; + +template +struct is_complete_type : std::true_type {}; + +template +struct is_compatible_object_type_impl : std::false_type {}; + +template +struct is_compatible_object_type_impl < + BasicJsonType, CompatibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + // macOS's is_constructible does not play well with nonesuch... + static constexpr bool value = + is_constructible::value && + is_constructible::value; +}; + +template +struct is_compatible_object_type + : is_compatible_object_type_impl {}; + +template +struct is_constructible_object_type_impl : std::false_type {}; + +template +struct is_constructible_object_type_impl < + BasicJsonType, ConstructibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + static constexpr bool value = + (is_default_constructible::value && + (std::is_move_assignable::value || + std::is_copy_assignable::value) && + (is_constructible::value && + std::is_same < + typename object_t::mapped_type, + typename ConstructibleObjectType::mapped_type >::value)) || + (has_from_json::value || + has_non_default_from_json < + BasicJsonType, + typename ConstructibleObjectType::mapped_type >::value); +}; + +template +struct is_constructible_object_type + : is_constructible_object_type_impl {}; + +template +struct is_compatible_string_type +{ + static constexpr auto value = + is_constructible::value; +}; + +template +struct is_constructible_string_type +{ + // launder type through decltype() to fix compilation failure on ICPC +#ifdef __INTEL_COMPILER + using laundered_type = decltype(std::declval()); +#else + using laundered_type = ConstructibleStringType; +#endif + + static constexpr auto value = + conjunction < + is_constructible, + is_detected_exact>::value; +}; + +template +struct is_compatible_array_type_impl : std::false_type {}; + +template +struct is_compatible_array_type_impl < + BasicJsonType, CompatibleArrayType, + enable_if_t < + is_detected::value&& + is_iterator_traits>>::value&& +// special case for types like std::filesystem::path whose iterator's value_type are themselves +// c.f. https://github.com/nlohmann/json/pull/3073 + !std::is_same>::value >> +{ + static constexpr bool value = + is_constructible>::value; +}; + +template +struct is_compatible_array_type + : is_compatible_array_type_impl {}; + +template +struct is_constructible_array_type_impl : std::false_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t::value >> + : std::true_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t < !std::is_same::value&& + !is_compatible_string_type::value&& + is_default_constructible::value&& +(std::is_move_assignable::value || + std::is_copy_assignable::value)&& +is_detected::value&& +is_iterator_traits>>::value&& +is_detected::value&& +// special case for types like std::filesystem::path whose iterator's value_type are themselves +// c.f. https://github.com/nlohmann/json/pull/3073 +!std::is_same>::value&& + is_complete_type < + detected_t>::value >> +{ + using value_type = range_value_t; + + static constexpr bool value = + std::is_same::value || + has_from_json::value || + has_non_default_from_json < + BasicJsonType, + value_type >::value; +}; + +template +struct is_constructible_array_type + : is_constructible_array_type_impl {}; + +template +struct is_compatible_integer_type_impl : std::false_type {}; + +template +struct is_compatible_integer_type_impl < + RealIntegerType, CompatibleNumberIntegerType, + enable_if_t < std::is_integral::value&& + std::is_integral::value&& + !std::is_same::value >> +{ + // is there an assert somewhere on overflows? + using RealLimits = std::numeric_limits; + using CompatibleLimits = std::numeric_limits; + + static constexpr auto value = + is_constructible::value && + CompatibleLimits::is_integer && + RealLimits::is_signed == CompatibleLimits::is_signed; +}; + +template +struct is_compatible_integer_type + : is_compatible_integer_type_impl {}; + +template +struct is_compatible_type_impl: std::false_type {}; + +template +struct is_compatible_type_impl < + BasicJsonType, CompatibleType, + enable_if_t::value >> +{ + static constexpr bool value = + has_to_json::value; +}; + +template +struct is_compatible_type + : is_compatible_type_impl {}; + +template +struct is_constructible_tuple : std::false_type {}; + +template +struct is_constructible_tuple> : conjunction...> {}; + +template +struct is_json_iterator_of : std::false_type {}; + +template +struct is_json_iterator_of : std::true_type {}; + +template +struct is_json_iterator_of : std::true_type +{}; + +// checks if a given type T is a template specialization of Primary +template