Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
62eaf03
Start landing code for Kaggle integration (#1320)
mattdangerw Nov 20, 2023
21fb04c
Switch byte pair tokenizer to save_assets/load_assets (#1322)
mattdangerw Nov 21, 2023
0e3c674
Convert SentencePieceTokenizer and associated models to new assets pa…
nkovela1 Nov 21, 2023
3619a6a
Add tests for Presets workflow, Add Metadata (#1326)
nkovela1 Nov 23, 2023
38806fd
Automatically add the keras framework to kaggle handles (#1331)
mattdangerw Nov 29, 2023
e0d34dc
Fix a failing byte pair tokenizer test (#1336)
mattdangerw Nov 30, 2023
0820d62
Use set comparison for assets (#1335)
mattdangerw Nov 30, 2023
c4b0c3c
Fix whisper tokenizer saving (#1334)
mattdangerw Nov 30, 2023
e3f8d06
Remove special case Bart from_preset (#1333)
mattdangerw Nov 30, 2023
dbb6487
Fix t5 tokenizer presets (#1339)
mattdangerw Nov 30, 2023
6130253
Script to convert presets (#1340)
mattdangerw Nov 30, 2023
814959b
Switch all preset to the new Kaggle format (#1338)
mattdangerw Dec 1, 2023
2aced24
Let kagglehub select latest version (#1342)
mattdangerw Dec 4, 2023
245b7e9
Use the proper title for example (#1346)
Philmod Dec 5, 2023
6ad8a30
Update conversion script (#1347)
mattdangerw Dec 6, 2023
7cc4323
Improve preset error messages (#1349)
mattdangerw Dec 7, 2023
9cc8110
Use subclass checking check_preset_class (#1344)
mattdangerw Dec 7, 2023
4606f32
Add a hacky fix for TF 2.13 and 2.14 weights.h5 loading (#1353)
mattdangerw Dec 7, 2023
9cb5838
Another fix for saving on Keras 2 (#1354)
mattdangerw Dec 7, 2023
039ff45
Switch our preset to there final kaggle location (#1345)
mattdangerw Dec 7, 2023
9cc3f84
Fix rebase issue in bytepair tokenizer (#1366)
nkovela1 Dec 12, 2023
6f7f9a0
Change encoding to utf-8 to fix Kaggle branch test failure for PyTorc…
sampathweb Dec 13, 2023
ddfca77
Fix GPU test issue with Keras 2 (#1368)
nkovela1 Dec 14, 2023
0e43f09
Add in-place modification of file keys for backwards compatibility (#…
nkovela1 Dec 15, 2023
4d84eb1
Add file renaming logic for modification (#1370)
nkovela1 Dec 16, 2023
29a0ae5
Fix task pre-processor in tasks (#1373)
sampathweb Dec 20, 2023
401e569
Backwards compatible fix for functional model saving (#1378)
mattdangerw Jan 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add tests for Presets workflow, Add Metadata (#1326)
* Add metadata and Albert preset utils test

* Add Bart bytepiece preset workflow test

* Add BERT WordPiece preset workflow test

* Parameterize tests, switch to classifier, address comments

* Address comments and nits

* Fix formatting

* Add large test marker
  • Loading branch information
nkovela1 authored and mattdangerw committed Jan 4, 2024
commit 3619a6a4c5746b1a6717e8341da6e59c90b61adf
4 changes: 2 additions & 2 deletions keras_nlp/tokenizers/sentence_piece_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
tf_text = None


VOCAB_FILENAME = "vocabulary.txt"
VOCAB_FILENAME = "vocabulary.spm"


@keras_nlp_export("keras_nlp.tokenizers.SentencePieceTokenizer")
Expand Down Expand Up @@ -132,7 +132,7 @@ def __init__(

def save_assets(self, dir_path):
path = os.path.join(dir_path, VOCAB_FILENAME)
with open(path, "w") as file:
with open(path, "wb") as file:
file.write(self.proto)

def load_assets(self, dir_path):
Expand Down
23 changes: 15 additions & 8 deletions keras_nlp/utils/preset_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,19 +100,26 @@ def save_to_preset(
# Include references to weights and assets.
config["assets"] = assets
config["weights"] = weights_filename if save_weights else None
recursive_pop(config, "config_config")
recursive_pop(config, "compile_config")
recursive_pop(config, "build_config")
with open(config_path, "w") as config_file:
config_file.write(json.dumps(config, indent=4))

from keras_nlp import __version__ as keras_nlp_version

keras_version = keras.version() if hasattr(keras, "version") else None

# Save any associated metadata.
metadata = {
# TODO: save keras version and keras-nlp version.
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
metadata_path = os.path.join(preset, "metadata.json")
with open(metadata_path, "w") as metadata_file:
metadata_file.write(json.dumps(metadata, indent=4))
if config_filename == "config.json":
metadata = {
"keras_version": keras_version,
"keras_nlp_version": keras_nlp_version,
"parameter_count": layer.count_params(),
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
metadata_path = os.path.join(preset, "metadata.json")
with open(metadata_path, "w") as metadata_file:
metadata_file.write(json.dumps(metadata, indent=4))


def load_from_preset(
Expand Down
89 changes: 89 additions & 0 deletions keras_nlp/utils/preset_utils_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os

import pytest
from absl.testing import parameterized

from keras_nlp.models import AlbertClassifier
from keras_nlp.models import BertClassifier
from keras_nlp.models import RobertaClassifier
from keras_nlp.tests.test_case import TestCase
from keras_nlp.utils import preset_utils


class PresetUtilsTest(TestCase):
@parameterized.parameters(
(AlbertClassifier, "albert_base_en_uncased", "sentencepiece"),
(RobertaClassifier, "roberta_base_en", "bytepair"),
(BertClassifier, "bert_tiny_en_uncased", "wordpiece"),
)
@pytest.mark.keras_3_only
@pytest.mark.large
def test_preset_saving(self, cls, preset_name, tokenizer_type):
save_dir = self.get_temp_dir()
model = cls.from_preset(preset_name, num_classes=2)
preset_utils.save_to_preset(model, save_dir)

if tokenizer_type == "bytepair":
vocab_filename = "assets/tokenizer/vocabulary.json"
expected_assets = [
"assets/tokenizer/vocabulary.json",
"assets/tokenizer/merges.txt",
]
elif tokenizer_type == "sentencepiece":
vocab_filename = "assets/tokenizer/vocabulary.spm"
expected_assets = ["assets/tokenizer/vocabulary.spm"]
else:
vocab_filename = "assets/tokenizer/vocabulary.txt"
expected_assets = ["assets/tokenizer/vocabulary.txt"]

# Check existence of files
self.assertTrue(os.path.exists(os.path.join(save_dir, vocab_filename)))
self.assertTrue(os.path.exists(os.path.join(save_dir, "config.json")))
self.assertTrue(
os.path.exists(os.path.join(save_dir, "model.weights.h5"))
)
self.assertTrue(os.path.exists(os.path.join(save_dir, "metadata.json")))

# Check the model config (`config.json`)
config_json = open(os.path.join(save_dir, "config.json"), "r").read()
self.assertTrue(
"build_config" not in config_json
) # Test on raw json to include nested keys
self.assertTrue(
"compile_config" not in config_json
) # Test on raw json to include nested keys
config = json.loads(config_json)
self.assertAllEqual(config["assets"], expected_assets)
self.assertEqual(config["weights"], "model.weights.h5")

# Try loading the model from preset directory
restored_model = preset_utils.load_from_preset(save_dir)

train_data = (
["the quick brown fox.", "the slow brown fox."], # Features.
)
model_input_data = model.preprocessor(*train_data)
restored_model_input_data = restored_model.preprocessor(*train_data)

# Check that saved vocab is equal to the original preset vocab
self.assertAllClose(model_input_data, restored_model_input_data)

# Check model outputs
self.assertAllEqual(
model(model_input_data), restored_model(restored_model_input_data)
)