forked from pytorch/torchtune
-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Chat dataset support for sarvam 1 with correct chat template (#11)
* sarvam 1 prompt template * fix chat template * links to sources * support caching packed datasets and correct the save interval * isort * correct the cache save part --------- Co-authored-by: mohit_sarvam_ai <[email protected]>
- Loading branch information
1 parent
70a2f91
commit 6d89357
Showing
9 changed files
with
340 additions
and
58 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# All rights reserved. | ||
# | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
from typing import List | ||
|
||
from torchtune.data import Message, PromptTemplateInterface | ||
|
||
# https://github.com/pytorch/torchtune/blob/26b2200010a37474015925c5e3f4606435b72dd3/torchtune/models/llama2/_prompt_template.py | ||
|
||
|
||
class Sarvam1ChatTemplate(PromptTemplateInterface): | ||
""" | ||
Prompt template that formats chat data of human and system prompts with appropriate tags | ||
used in Llama2 pre-training. Taken from Meta's official `Llama inference | ||
repository <https://github.com/meta-llama/llama/blob/main/llama/generation.py>`_. | ||
.. code-block:: text | ||
"[INST] <<SYS>> | ||
You are a helpful, respectful and honest assistant. | ||
<</SYS>>" | ||
I am going to Paris, what should I see? [/INST] Paris, the capital of France, is known for its stunning architecture..." | ||
""" | ||
|
||
template = { | ||
"system": ("<<SYS>>\n", "\n<</SYS>>\n\n"), | ||
"user": ("[INST] ", " [/INST] "), | ||
"assistant": ("", ""), | ||
"ipython": ("", ""), | ||
} | ||
|
||
def __call__( | ||
self, | ||
messages: List[Message], | ||
) -> List[Message]: | ||
""" | ||
Format user and system messages with appropriate tags. | ||
Args: | ||
messages (List[Message]): a single conversation, structured as a list | ||
of `Message` objects | ||
Returns: | ||
The formatted list of messages | ||
""" | ||
system_message = [] | ||
formatted_dialogue = [] | ||
for message in messages: | ||
if message.role == "system": | ||
system_message = ( | ||
[{"type": "text", "content": self.template["system"][0]}] | ||
+ message.content | ||
+ [{"type": "text", "content": self.template["system"][1]}] | ||
) | ||
# Incorporate the system message in the user message - Llama2 only | ||
# looks for the <<SYS>> tags and not the explicit role so this will | ||
# be treated the same as an actual system message. We do this because | ||
# of the nesting of the system prompt in the user message. | ||
continue | ||
elif message.role == "user": | ||
content = ( | ||
[{"type": "text", "content": self.template["user"][0]}] | ||
+ system_message | ||
+ message.content | ||
+ [{"type": "text", "content": self.template["user"][1]}] | ||
) | ||
elif message.role == "assistant": | ||
# No special formatting needed for assistant message | ||
content = message.content | ||
formatted_dialogue.append( | ||
Message( | ||
role=message.role, | ||
content=content, | ||
masked=message.masked, | ||
ipython=message.ipython, | ||
eot=message.eot, | ||
), | ||
) | ||
return formatted_dialogue |
Oops, something went wrong.