Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
62eaf03
Start landing code for Kaggle integration (#1320)
mattdangerw Nov 20, 2023
21fb04c
Switch byte pair tokenizer to save_assets/load_assets (#1322)
mattdangerw Nov 21, 2023
0e3c674
Convert SentencePieceTokenizer and associated models to new assets pa…
nkovela1 Nov 21, 2023
3619a6a
Add tests for Presets workflow, Add Metadata (#1326)
nkovela1 Nov 23, 2023
38806fd
Automatically add the keras framework to kaggle handles (#1331)
mattdangerw Nov 29, 2023
e0d34dc
Fix a failing byte pair tokenizer test (#1336)
mattdangerw Nov 30, 2023
0820d62
Use set comparison for assets (#1335)
mattdangerw Nov 30, 2023
c4b0c3c
Fix whisper tokenizer saving (#1334)
mattdangerw Nov 30, 2023
e3f8d06
Remove special case Bart from_preset (#1333)
mattdangerw Nov 30, 2023
dbb6487
Fix t5 tokenizer presets (#1339)
mattdangerw Nov 30, 2023
6130253
Script to convert presets (#1340)
mattdangerw Nov 30, 2023
814959b
Switch all preset to the new Kaggle format (#1338)
mattdangerw Dec 1, 2023
2aced24
Let kagglehub select latest version (#1342)
mattdangerw Dec 4, 2023
245b7e9
Use the proper title for example (#1346)
Philmod Dec 5, 2023
6ad8a30
Update conversion script (#1347)
mattdangerw Dec 6, 2023
7cc4323
Improve preset error messages (#1349)
mattdangerw Dec 7, 2023
9cc8110
Use subclass checking check_preset_class (#1344)
mattdangerw Dec 7, 2023
4606f32
Add a hacky fix for TF 2.13 and 2.14 weights.h5 loading (#1353)
mattdangerw Dec 7, 2023
9cb5838
Another fix for saving on Keras 2 (#1354)
mattdangerw Dec 7, 2023
039ff45
Switch our preset to there final kaggle location (#1345)
mattdangerw Dec 7, 2023
9cc3f84
Fix rebase issue in bytepair tokenizer (#1366)
nkovela1 Dec 12, 2023
6f7f9a0
Change encoding to utf-8 to fix Kaggle branch test failure for PyTorc…
sampathweb Dec 13, 2023
ddfca77
Fix GPU test issue with Keras 2 (#1368)
nkovela1 Dec 14, 2023
0e43f09
Add in-place modification of file keys for backwards compatibility (#…
nkovela1 Dec 15, 2023
4d84eb1
Add file renaming logic for modification (#1370)
nkovela1 Dec 16, 2023
29a0ae5
Fix task pre-processor in tasks (#1373)
sampathweb Dec 20, 2023
401e569
Backwards compatible fix for functional model saving (#1378)
mattdangerw Jan 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Start landing code for Kaggle integration (#1320)
* Demo preset directories

* Address comments

* Move packer and masker to build and debug tests

* Address comments

* Fix maskedLM preprocessor test

* Fix remaining tests

* Fix serialization tests on tf backend

---------

Co-authored-by: Neel Kovelamudi <[email protected]>
  • Loading branch information
mattdangerw and nkovela1 committed Jan 4, 2024
commit 62eaf03d9eb5b4f60ee78e1350d2e016ccf34e5a
2 changes: 1 addition & 1 deletion keras_nlp/layers/modeling/token_and_position_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def get_config(self):
),
"tie_weights": self.token_embedding.tie_weights,
"mask_zero": self.token_embedding.mask_zero,
},
}
)
return config

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

import numpy as np

from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.token_and_position_embedding import (
Expand All @@ -31,7 +30,7 @@ def test_layer_behaviors(self):
"vocabulary_size": 5,
"sequence_length": 4,
"embedding_dim": 3,
"embeddings_initializer": keras.initializers.Constant(1.0),
"embeddings_initializer": "ones",
},
input_data=random.randint(minval=0, maxval=5, shape=(2, 4)),
expected_output_shape=(2, 4, 3),
Expand Down
7 changes: 6 additions & 1 deletion keras_nlp/layers/preprocessing/preprocessing_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,12 @@ def __init__(self, **kwargs):
super().__init__(**kwargs)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.built = True
# Most pre-preprocessing has no build.
if not hasattr(self, "build"):
self.built = True

def get_build_config(self):
return None

def __call__(self, *args, **kwargs):
# Always place on CPU for preprocessing, to avoid expensive back and
Expand Down
63 changes: 36 additions & 27 deletions keras_nlp/models/backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import os

from keras_nlp.backend import keras
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring

Expand Down Expand Up @@ -66,6 +68,31 @@ def from_config(cls, config):
def presets(cls):
return {}

@classmethod
def _legacy_from_preset(
cls,
preset,
load_weights=True,
**kwargs,
):
metadata = cls.presets[preset]
config = metadata["config"]
model = cls.from_config({**config, **kwargs})

if not load_weights:
return model

filename = os.path.basename(metadata["weights_url"])
weights = keras.utils.get_file(
filename,
metadata["weights_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["weights_hash"],
)

model.load_weights(weights)
return model

@classmethod
def from_preset(
cls,
Expand Down Expand Up @@ -94,35 +121,17 @@ def from_preset(
)
```
"""

if not cls.presets:
raise NotImplementedError(
"No presets have been created for this class."
)

if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]
config = metadata["config"]
model = cls.from_config({**config, **kwargs})

if not load_weights:
return model

filename = os.path.basename(metadata["weights_url"])
weights = keras.utils.get_file(
filename,
metadata["weights_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["weights_hash"],
# TODO: delete me!
if preset in cls.presets:
return cls._legacy_from_preset(preset, **kwargs)

check_preset_class(preset, cls)
return load_from_preset(
preset,
load_weights=load_weights,
config_overrides=kwargs,
)

model.load_weights(weights)
return model

def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to setup a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
Expand Down
2 changes: 1 addition & 1 deletion keras_nlp/models/bert/bert_classifier_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def setUp(self):
num_heads=2,
hidden_dim=2,
intermediate_dim=4,
max_sequence_length=self.preprocessor.packer.sequence_length,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
Expand Down
53 changes: 31 additions & 22 deletions keras_nlp/models/bert/bert_masked_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,33 +134,30 @@ def __init__(
truncate=truncate,
**kwargs,
)

self.mask_selection_rate = mask_selection_rate
self.mask_selection_length = mask_selection_length
self.mask_token_rate = mask_token_rate
self.random_token_rate = random_token_rate
self.masker = None

def build(self, input_shape):
super().build(input_shape)
# Defer masker creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.masker = MaskedLMMaskGenerator(
mask_selection_rate=mask_selection_rate,
mask_selection_length=mask_selection_length,
mask_token_rate=mask_token_rate,
random_token_rate=random_token_rate,
vocabulary_size=tokenizer.vocabulary_size(),
mask_token_id=tokenizer.mask_token_id,
mask_selection_rate=self.mask_selection_rate,
mask_selection_length=self.mask_selection_length,
mask_token_rate=self.mask_token_rate,
random_token_rate=self.random_token_rate,
vocabulary_size=self.tokenizer.vocabulary_size(),
mask_token_id=self.tokenizer.mask_token_id,
unselectable_token_ids=[
tokenizer.cls_token_id,
tokenizer.sep_token_id,
tokenizer.pad_token_id,
self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
self.tokenizer.pad_token_id,
],
)

def get_config(self):
config = super().get_config()
config.update(
{
"mask_selection_rate": self.masker.mask_selection_rate,
"mask_selection_length": self.masker.mask_selection_length,
"mask_token_rate": self.masker.mask_token_rate,
"random_token_rate": self.masker.random_token_rate,
}
)
return config

def call(self, x, y=None, sample_weight=None):
if y is not None or sample_weight is not None:
logging.warning(
Expand All @@ -187,3 +184,15 @@ def call(self, x, y=None, sample_weight=None):
y = masker_outputs["mask_ids"]
sample_weight = masker_outputs["mask_weights"]
return pack_x_y_sample_weight(x, y, sample_weight)

def get_config(self):
config = super().get_config()
config.update(
{
"mask_selection_rate": self.mask_selection_rate,
"mask_selection_length": self.mask_selection_length,
"mask_token_rate": self.mask_token_rate,
"random_token_rate": self.random_token_rate,
}
)
return config
2 changes: 1 addition & 1 deletion keras_nlp/models/bert/bert_masked_lm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def setUp(self):
num_heads=2,
hidden_dim=2,
intermediate_dim=4,
max_sequence_length=self.preprocessor.packer.sequence_length,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
Expand Down
26 changes: 17 additions & 9 deletions keras_nlp/models/bert/bert_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,24 +139,32 @@ def __init__(
):
super().__init__(**kwargs)
self.tokenizer = tokenizer
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
pad_value=self.tokenizer.pad_token_id,
truncate=truncate,
sequence_length=sequence_length,
)
self.sequence_length = sequence_length
self.truncate = truncate
self.packer = None

def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.packer.sequence_length,
"truncate": self.packer.truncate,
"sequence_length": self.sequence_length,
"truncate": self.truncate,
}
)
return config

def build(self, input_shape):
# Defer packer creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
pad_value=self.tokenizer.pad_token_id,
truncate=self.truncate,
sequence_length=self.sequence_length,
)
self.built = True

def call(self, x, y=None, sample_weight=None):
x = convert_inputs_to_list_of_tensor_segments(x)
x = [self.tokenizer(segment) for segment in x]
Expand Down
45 changes: 27 additions & 18 deletions keras_nlp/models/bert/bert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class BertTokenizer(WordPieceTokenizer):

def __init__(
self,
vocabulary,
vocabulary=None,
lowercase=False,
**kwargs,
):
Expand All @@ -84,23 +84,32 @@ def __init__(
**kwargs,
)

# Check for necessary special tokens.
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = "[PAD]"
mask_token = "[MASK]"
for token in [cls_token, pad_token, sep_token]:
if token not in self.get_vocabulary():
raise ValueError(
f"Cannot find token `'{token}'` in the provided "
f"`vocabulary`. Please provide `'{token}'` in your "
"`vocabulary` or use a pretrained `vocabulary` name."
)

self.cls_token_id = self.token_to_id(cls_token)
self.sep_token_id = self.token_to_id(sep_token)
self.pad_token_id = self.token_to_id(pad_token)
self.mask_token_id = self.token_to_id(mask_token)
def set_vocabulary(self, vocabulary):
super().set_vocabulary(vocabulary)

if vocabulary is not None:
# Check for necessary special tokens.
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = "[PAD]"
mask_token = "[MASK]"
for token in [cls_token, pad_token, sep_token]:
if token not in self.vocabulary:
raise ValueError(
f"Cannot find token `'{token}'` in the provided "
f"`vocabulary`. Please provide `'{token}'` in your "
"`vocabulary` or use a pretrained `vocabulary` name."
)

self.cls_token_id = self.token_to_id(cls_token)
self.sep_token_id = self.token_to_id(sep_token)
self.pad_token_id = self.token_to_id(pad_token)
self.mask_token_id = self.token_to_id(mask_token)
else:
self.cls_token_id = None
self.sep_token_id = None
self.pad_token_id = None
self.mask_token_id = None

@classproperty
def presets(cls):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def setUp(self):
num_heads=2,
hidden_dim=2,
intermediate_dim=4,
max_sequence_length=self.preprocessor.packer.sequence_length,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
Expand Down
53 changes: 31 additions & 22 deletions keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,33 +136,30 @@ def __init__(
truncate=truncate,
**kwargs,
)

self.mask_selection_rate = mask_selection_rate
self.mask_selection_length = mask_selection_length
self.mask_token_rate = mask_token_rate
self.random_token_rate = random_token_rate
self.masker = None

def build(self, input_shape):
super().build(input_shape)
# Defer masker creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.masker = MaskedLMMaskGenerator(
mask_selection_rate=mask_selection_rate,
mask_selection_length=mask_selection_length,
mask_token_rate=mask_token_rate,
random_token_rate=random_token_rate,
vocabulary_size=tokenizer.vocabulary_size(),
mask_token_id=tokenizer.mask_token_id,
mask_selection_rate=self.mask_selection_rate,
mask_selection_length=self.mask_selection_length,
mask_token_rate=self.mask_token_rate,
random_token_rate=self.random_token_rate,
vocabulary_size=self.tokenizer.vocabulary_size(),
mask_token_id=self.tokenizer.mask_token_id,
unselectable_token_ids=[
tokenizer.cls_token_id,
tokenizer.sep_token_id,
tokenizer.pad_token_id,
self.tokenizer.cls_token_id,
self.tokenizer.sep_token_id,
self.tokenizer.pad_token_id,
],
)

def get_config(self):
config = super().get_config()
config.update(
{
"mask_selection_rate": self.masker.mask_selection_rate,
"mask_selection_length": self.masker.mask_selection_length,
"mask_token_rate": self.masker.mask_token_rate,
"random_token_rate": self.masker.random_token_rate,
}
)
return config

def call(self, x, y=None, sample_weight=None):
if y is not None or sample_weight is not None:
logging.warning(
Expand All @@ -183,3 +180,15 @@ def call(self, x, y=None, sample_weight=None):
y = masker_outputs["mask_ids"]
sample_weight = masker_outputs["mask_weights"]
return pack_x_y_sample_weight(x, y, sample_weight)

def get_config(self):
config = super().get_config()
config.update(
{
"mask_selection_rate": self.mask_selection_rate,
"mask_selection_length": self.mask_selection_length,
"mask_token_rate": self.mask_token_rate,
"random_token_rate": self.random_token_rate,
}
)
return config
Loading