From 28bf3dd585f6e82d35bf3e4922b0871ebc43f2a3 Mon Sep 17 00:00:00 2001
From: Aleksandr Borzunov
Date: Wed, 24 Jul 2024 12:34:37 +0000
Subject: [PATCH 01/12] Fix layer_idx warning
---
src/petals/models/llama/block.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/petals/models/llama/block.py b/src/petals/models/llama/block.py
index 77f9a05f..1c89ad3f 100644
--- a/src/petals/models/llama/block.py
+++ b/src/petals/models/llama/block.py
@@ -132,7 +132,8 @@ class OptimizedLlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: LlamaConfig):
nn.Module.__init__(self)
self.hidden_size = config.hidden_size
- self.self_attn = OptimizedLlamaAttention(config=config)
+ self.self_attn = OptimizedLlamaAttention(config=config, layer_idx=0)
+ # layer_idx only matters for KV caching, and we re-implement it in Petals
self.mlp = LlamaMLP(config)
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
From 2c85a22c2de4f27cd615be6f5d581256bb760146 Mon Sep 17 00:00:00 2001
From: Aleksandr Borzunov
Date: Wed, 24 Jul 2024 12:34:51 +0000
Subject: [PATCH 02/12] Fix peft memory estimation
---
src/petals/utils/peft.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/petals/utils/peft.py b/src/petals/utils/peft.py
index 5d93ce6d..7817f73c 100644
--- a/src/petals/utils/peft.py
+++ b/src/petals/utils/peft.py
@@ -267,7 +267,7 @@ def estimate_adapter_memory_per_block(
**load_peft_kwargs,
) -> int:
"""Get the number of extra bytes used to store a set of adapters per given block"""
- with init_empty_weights(include_buffers=True):
+ with init_empty_weights(include_buffers=False):
block = get_model_block(block_config)
base_block_parameters = sum(p.numel() for p in block.parameters())
create_lora_adapter(block)
From a65205dd5d2ab9f498884d599932ad98b07a3ad3 Mon Sep 17 00:00:00 2001
From: Aleksandr Borzunov
Date: Wed, 24 Jul 2024 12:35:01 +0000
Subject: [PATCH 03/12] Update license messages
---
src/petals/models/bloom/config.py | 2 +-
src/petals/models/llama/config.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/petals/models/bloom/config.py b/src/petals/models/bloom/config.py
index cda4cf71..d9b4f253 100644
--- a/src/petals/models/bloom/config.py
+++ b/src/petals/models/bloom/config.py
@@ -24,7 +24,7 @@ class DistributedBloomConfig(BloomConfig, ClientConfig, PTuneConfig, LMHeadConfi
def from_pretrained(
cls, model_name_or_path: Union[str, os.PathLike, None], *args, dht_prefix: Optional[str] = None, **kwargs
):
- logger.info("Make sure you follow the BLOOM's terms of use: https://bit.ly/bloom-license")
+ logger.info("Make sure you follow the BLOOM terms of use: https://bit.ly/bloom-license")
loading_from_repo = model_name_or_path is not None and not os.path.isdir(model_name_or_path)
if loading_from_repo and dht_prefix is None:
diff --git a/src/petals/models/llama/config.py b/src/petals/models/llama/config.py
index ae71a4c1..d61f5e85 100644
--- a/src/petals/models/llama/config.py
+++ b/src/petals/models/llama/config.py
@@ -27,8 +27,8 @@ def from_pretrained(
cls, model_name_or_path: Union[str, os.PathLike, None], *args, dht_prefix: Optional[str] = None, **kwargs
):
logger.info(
- "Make sure you follow the LLaMA's terms of use: "
- "https://bit.ly/llama2-license for LLaMA 2, https://bit.ly/llama-license for LLaMA 1"
+ "Make sure you follow the Llama terms of use: "
+ "https://llama.meta.com/llama3/license, https://llama.meta.com/llama2/license"
)
loading_from_repo = model_name_or_path is not None and not os.path.isdir(model_name_or_path)
From 5321af81da155c0cb7a5428a29438c33ca679865 Mon Sep 17 00:00:00 2001
From: Aleksandr Borzunov
Date: Wed, 24 Jul 2024 12:43:17 +0000
Subject: [PATCH 04/12] Remove excess load_state_dict() leading to meta tensor
warnings
---
src/petals/server/from_pretrained.py | 5 -----
1 file changed, 5 deletions(-)
diff --git a/src/petals/server/from_pretrained.py b/src/petals/server/from_pretrained.py
index 4a3b1507..ac6a23e0 100644
--- a/src/petals/server/from_pretrained.py
+++ b/src/petals/server/from_pretrained.py
@@ -64,10 +64,6 @@ def load_pretrained_block(
max_disk_space=max_disk_space,
)
- # dummy load, check that keys match
- report = block.load_state_dict(state_dict, strict=False)
- assert not report.missing_keys, f"Some block weights are missing: {report.missing_keys}"
-
for param_name, _ in block.named_parameters():
assert param_name in state_dict, f"{param_name} not in state dict"
param = state_dict[param_name]
@@ -76,7 +72,6 @@ def load_pretrained_block(
set_module_tensor_to_device(block, param_name, "cpu", value=param, dtype=param.dtype)
logger.info(f"Loaded {model_name} block {block_index}")
- logger.debug(f"Details: {report}")
return block
From dc1a0f41dbd6ae4699cd0f667770fa27fb1dfd79 Mon Sep 17 00:00:00 2001
From: Aleksandr Borzunov
Date: Wed, 24 Jul 2024 12:45:47 +0000
Subject: [PATCH 05/12] Remove unused imports
---
src/petals/models/bloom/block.py | 2 +-
src/petals/models/llama/block.py | 1 -
src/petals/models/mixtral/block.py | 3 +--
3 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/src/petals/models/bloom/block.py b/src/petals/models/bloom/block.py
index 439b9ca1..01a74b21 100644
--- a/src/petals/models/bloom/block.py
+++ b/src/petals/models/bloom/block.py
@@ -7,7 +7,7 @@
import torch
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
-from transformers.models.bloom.modeling_bloom import BloomBlock, BloomModel, build_alibi_tensor
+from transformers.models.bloom.modeling_bloom import BloomBlock, build_alibi_tensor
from petals.utils.misc import is_dummy
diff --git a/src/petals/models/llama/block.py b/src/petals/models/llama/block.py
index 1c89ad3f..4ff9d3f6 100644
--- a/src/petals/models/llama/block.py
+++ b/src/petals/models/llama/block.py
@@ -15,7 +15,6 @@
LlamaConfig,
LlamaDecoderLayer,
LlamaMLP,
- LlamaModel,
LlamaRMSNorm,
repeat_kv,
rotate_half,
diff --git a/src/petals/models/mixtral/block.py b/src/petals/models/mixtral/block.py
index 7a2bd9fe..58acd144 100644
--- a/src/petals/models/mixtral/block.py
+++ b/src/petals/models/mixtral/block.py
@@ -1,4 +1,3 @@
-import json
from typing import Optional, Tuple
import torch
@@ -8,7 +7,7 @@
_prepare_4d_causal_attention_mask,
_prepare_4d_causal_attention_mask_for_sdpa,
)
-from transformers.models.mixtral.modeling_mixtral import MixtralDecoderLayer, MixtralModel
+from transformers.models.mixtral.modeling_mixtral import MixtralDecoderLayer
class WrappedMixtralBlock(MixtralDecoderLayer):
From 7195366d982b54aceb39f7dbe1c44a21eab29ec4 Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 05:57:15 -0700
Subject: [PATCH 06/12] Update README.md
---
README.md | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/README.md b/README.md
index c5e6872e..b387582f 100644
--- a/README.md
+++ b/README.md
@@ -8,16 +8,14 @@
-**Warning: Llama 3.1 support is still under construction!** the latest models require custom RoPE configuration that we do not have in Petals yet; we will update the code to fix that within a day.
-
-Generate text with distributed **Llama (1-3)** (70B), **Falcon** (40B+), **BLOOM** (176B) (or their derivatives), and fineβtune them for your own tasks — right from your desktop computer or Google Colab:
+Generate text with distributed **Llama 3.1** (up to 405B), **Mixtral** (8x7B), **Falcon** (40B+), or **BLOOM** (176B) and fineβtune them for your own tasks — right from your desktop computer or Google Colab:
```python
from transformers import AutoTokenizer
from petals import AutoDistributedModelForCausalLM
# Choose any model available at https://health.petals.dev
-model_name = "petals-team/StableBeluga2" # This one is fine-tuned Llama 2 (70B)
+model_name = "meta-llama/Meta-Llama-3.1-405B-Instruct"
# Connect to a distributed network hosting model layers
tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -33,9 +31,9 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
π Try now in Colab
-π **Privacy.** Your data will be processed with the help of other people in the public swarm. Learn more about privacy [here](https://github.com/bigscience-workshop/petals/wiki/Security,-privacy,-and-AI-safety). For sensitive data, you can set up a [private swarm](https://github.com/bigscience-workshop/petals/wiki/Launch-your-own-swarm) among people you trust.
+π¦ **Want to run Llama 3?** Request access to its weights at the π€ [Model Hub](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct), then run `huggingface-cli login` in the terminal before loading the model. Or just try it in our [chatbot app](https://chat.petals.dev).
-π¦ **Want to run Llama 2?** Request access to its weights at the βΎοΈ [Meta AI website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and π€ [Model Hub](https://huggingface.co/meta-llama/Llama-2-70b-hf), then run `huggingface-cli login` in the terminal before loading the model. Or just try it in our [chatbot app](https://chat.petals.dev).
+π **Privacy.** Your data will be processed with the help of other people in the public swarm. Learn more about privacy [here](https://github.com/bigscience-workshop/petals/wiki/Security,-privacy,-and-AI-safety). For sensitive data, you can set up a [private swarm](https://github.com/bigscience-workshop/petals/wiki/Launch-your-own-swarm) among people you trust.
π¬ **Any questions?** Ping us in [our Discord](https://discord.gg/KdThf2bWVU)!
@@ -73,12 +71,12 @@ python3 -m petals.cli.run_server petals-team/StableBeluga2
π Learn more (how to use multiple GPUs, start the server on boot, etc.)
-π¬ **Any questions?** Ping us in [our Discord](https://discord.gg/X7DgtxgMhc)!
-
-π¦ **Want to host Llama 2?** Request access to its weights at the βΎοΈ [Meta AI website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and π€ [Model Hub](https://huggingface.co/meta-llama/Llama-2-70b-hf), generate an π [access token](https://huggingface.co/settings/tokens), then add `--token YOUR_TOKEN_HERE` to the `python -m petals.cli.run_server` command.
+π¦ **Want to host Llama 3?** Request access to its weights at the π€ [Model Hub](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct), then run `huggingface-cli login` in the terminal before loading the model.
π **Security.** Hosting a server does not allow others to run custom code on your computer. Learn more [here](https://github.com/bigscience-workshop/petals/wiki/Security,-privacy,-and-AI-safety).
+π¬ **Any questions?** Ping us in [our Discord](https://discord.gg/X7DgtxgMhc)!
+
π **Thank you!** Once you load and host 10+ blocks, we can show your name or link on the [swarm monitor](https://health.petals.dev) as a way to say thanks. You can specify them with `--public_name YOUR_NAME`.
## How does it work?
From 0a8dfd20982dc89b2918495190cd28d216c89643 Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 05:59:12 -0700
Subject: [PATCH 07/12] Update README.md
---
README.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index b387582f..0df3f826 100644
--- a/README.md
+++ b/README.md
@@ -39,14 +39,14 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
## Connect your GPU and increase Petals capacity
-Petals is a community-run system — we rely on people sharing their GPUs. You can check out [available models](https://health.petals.dev) and help serving one of them! As an example, here is how to host a part of [Stable Beluga 2](https://huggingface.co/stabilityai/StableBeluga2) on your GPU:
+Petals is a community-run system — we rely on people sharing their GPUs. You can check out [available models](https://health.petals.dev) and help serving one of them! As an example, here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
π§ **Linux + Anaconda.** Run these commands for NVIDIA GPUs (or follow [this](https://github.com/bigscience-workshop/petals/wiki/Running-on-AMD-GPU) for AMD):
```bash
conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia
pip install git+https://github.com/bigscience-workshop/petals
-python -m petals.cli.run_server petals-team/StableBeluga2
+python -m petals.cli.run_server meta-llama/Meta-Llama-3.1-405B-Instruct
```
πͺ **Windows + WSL.** Follow [this guide](https://github.com/bigscience-workshop/petals/wiki/Run-Petals-server-on-Windows) on our Wiki.
@@ -56,7 +56,7 @@ python -m petals.cli.run_server petals-team/StableBeluga2
```bash
sudo docker run -p 31330:31330 --ipc host --gpus all --volume petals-cache:/cache --rm \
learningathome/petals:main \
- python -m petals.cli.run_server --port 31330 petals-team/StableBeluga2
+ python -m petals.cli.run_server --port 31330 meta-llama/Meta-Llama-3.1-405B-Instruct
```
π **macOS + Apple M1/M2 GPU.** Install [Homebrew](https://brew.sh/), then run these commands:
@@ -64,7 +64,7 @@ sudo docker run -p 31330:31330 --ipc host --gpus all --volume petals-cache:/cach
```bash
brew install python
python3 -m pip install git+https://github.com/bigscience-workshop/petals
-python3 -m petals.cli.run_server petals-team/StableBeluga2
+python3 -m petals.cli.run_server meta-llama/Meta-Llama-3.1-405B-Instruct
```
From 6e4399b2f11b0ea6d26b83479d0e3e1af5ac16b3 Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 06:04:13 -0700
Subject: [PATCH 08/12] Update README.md
---
README.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index 0df3f826..3589e5a5 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
π Try now in Colab
-π¦ **Want to run Llama 3?** Request access to its weights at the π€ [Model Hub](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct), then run `huggingface-cli login` in the terminal before loading the model. Or just try it in our [chatbot app](https://chat.petals.dev).
+π¦ **Want to run Llama?** [Request access](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) to its weights, then run `huggingface-cli login` in the terminal before loading the model. Or just try it in our [chatbot app](https://chat.petals.dev).
π **Privacy.** Your data will be processed with the help of other people in the public swarm. Learn more about privacy [here](https://github.com/bigscience-workshop/petals/wiki/Security,-privacy,-and-AI-safety). For sensitive data, you can set up a [private swarm](https://github.com/bigscience-workshop/petals/wiki/Launch-your-own-swarm) among people you trust.
@@ -39,7 +39,9 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
## Connect your GPU and increase Petals capacity
-Petals is a community-run system — we rely on people sharing their GPUs. You can check out [available models](https://health.petals.dev) and help serving one of them! As an example, here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
+Petals is a community-run system — we rely on people sharing their GPUs. You can help serving one of the [available models](https://health.petals.dev) or host a new model from π€ [Model Hub](https://huggingface.co/models)! As an example, here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
+
+π¦ **Want to host Llama?** [Request access](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) to its weights, then run `huggingface-cli login` in the terminal before loading the model.
π§ **Linux + Anaconda.** Run these commands for NVIDIA GPUs (or follow [this](https://github.com/bigscience-workshop/petals/wiki/Running-on-AMD-GPU) for AMD):
@@ -71,8 +73,6 @@ python3 -m petals.cli.run_server meta-llama/Meta-Llama-3.1-405B-Instruct
π Learn more (how to use multiple GPUs, start the server on boot, etc.)
-π¦ **Want to host Llama 3?** Request access to its weights at the π€ [Model Hub](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct), then run `huggingface-cli login` in the terminal before loading the model.
-
π **Security.** Hosting a server does not allow others to run custom code on your computer. Learn more [here](https://github.com/bigscience-workshop/petals/wiki/Security,-privacy,-and-AI-safety).
π¬ **Any questions?** Ping us in [our Discord](https://discord.gg/X7DgtxgMhc)!
From 730126b021bfbbbcd50019bd3c2475fbbfa3892c Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 06:05:00 -0700
Subject: [PATCH 09/12] Update README.md
From 63636783f2bd460146fd58ec10ae9d4fdccd6596 Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 06:06:29 -0700
Subject: [PATCH 10/12] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 3589e5a5..15705370 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
## Connect your GPU and increase Petals capacity
-Petals is a community-run system — we rely on people sharing their GPUs. You can help serving one of the [available models](https://health.petals.dev) or host a new model from π€ [Model Hub](https://huggingface.co/models)! As an example, here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
+Petals is a community-run system — we rely on people sharing their GPUs. You can help serving one of the [available models](https://health.petals.dev) or host a new model from π€ [Model Hub](https://huggingface.co/models)! Here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
π¦ **Want to host Llama?** [Request access](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) to its weights, then run `huggingface-cli login` in the terminal before loading the model.
From 6232d44c7e1b95464265c91693b45dc7c7c8ae7c Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 06:07:46 -0700
Subject: [PATCH 11/12] Update README.md
---
README.md | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 15705370..4194b9e6 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,9 @@ print(tokenizer.decode(outputs[0])) # A cat sat on a mat...
## Connect your GPU and increase Petals capacity
-Petals is a community-run system — we rely on people sharing their GPUs. You can help serving one of the [available models](https://health.petals.dev) or host a new model from π€ [Model Hub](https://huggingface.co/models)! Here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
+Petals is a community-run system — we rely on people sharing their GPUs. You can help serving one of the [available models](https://health.petals.dev) or host a new model from π€ [Model Hub](https://huggingface.co/models)!
+
+As an example, here is how to host a part of [Llama 3.1 (405B) Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) on your GPU:
π¦ **Want to host Llama?** [Request access](https://huggingface.co/meta-llama/Meta-Llama-3.1-405B-Instruct) to its weights, then run `huggingface-cli login` in the terminal before loading the model.
From 837d3170fbf2931fa7aafe035abde5f4d059f382 Mon Sep 17 00:00:00 2001
From: Alexander Borzunov
Date: Wed, 24 Jul 2024 06:22:21 -0700
Subject: [PATCH 12/12] Update citations
---
README.md | 29 +++++++++++++++++++++++------
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index 4194b9e6..8bd4b265 100644
--- a/README.md
+++ b/README.md
@@ -122,22 +122,39 @@ Please see **Section 3.3** of our [paper](https://arxiv.org/pdf/2209.01188.pdf).
Please see our [FAQ](https://github.com/bigscience-workshop/petals/wiki/FAQ:-Frequently-asked-questions#contributing) on contributing.
-### π Citation
+### π Citations
Alexander Borzunov, Dmitry Baranchuk, Tim Dettmers, Max Ryabinin, Younes Belkada, Artem Chumachenko, Pavel Samygin, and Colin Raffel.
[Petals: Collaborative Inference and Fine-tuning of Large Models.](https://arxiv.org/abs/2209.01188)
-_arXiv preprint arXiv:2209.01188,_ 2022.
+_Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)._ 2023.
```bibtex
-@article{borzunov2022petals,
+@inproceedings{borzunov2023petals,
title = {Petals: Collaborative Inference and Fine-tuning of Large Models},
- author = {Borzunov, Alexander and Baranchuk, Dmitry and Dettmers, Tim and Ryabinin, Max and Belkada, Younes and Chumachenko, Artem and Samygin, Pavel and Raffel, Colin},
- journal = {arXiv preprint arXiv:2209.01188},
- year = {2022},
+ author = {Borzunov, Alexander and Baranchuk, Dmitry and Dettmers, Tim and Riabinin, Maksim and Belkada, Younes and Chumachenko, Artem and Samygin, Pavel and Raffel, Colin},
+ booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
+ pages = {558--568},
+ year = {2023},
url = {https://arxiv.org/abs/2209.01188}
}
```
+Alexander Borzunov, Max Ryabinin, Artem Chumachenko, Dmitry Baranchuk, Tim Dettmers, Younes Belkada, Pavel Samygin, and Colin Raffel.
+[Distributed inference and fine-tuning of large language models over the Internet.](https://arxiv.org/abs/2312.08361)
+_Advances in Neural Information Processing Systems_ 36 (2024).
+
+```bibtex
+@inproceedings{borzunov2023distributed,
+ title = {Distributed inference and fine-tuning of large language models over the {I}nternet},
+ author = {Borzunov, Alexander and Ryabinin, Max and Chumachenko, Artem and Baranchuk, Dmitry and Dettmers, Tim and Belkada, Younes and Samygin, Pavel and Raffel, Colin},
+ booktitle = {Advances in Neural Information Processing Systems},
+ volume = {36},
+ pages = {12312--12331},
+ year = {2023},
+ url = {https://arxiv.org/abs/2312.08361}
+}
+```
+
--------------------------------------------------------------------------------