Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
62eaf03
Start landing code for Kaggle integration (#1320)
mattdangerw Nov 20, 2023
21fb04c
Switch byte pair tokenizer to save_assets/load_assets (#1322)
mattdangerw Nov 21, 2023
0e3c674
Convert SentencePieceTokenizer and associated models to new assets pa…
nkovela1 Nov 21, 2023
3619a6a
Add tests for Presets workflow, Add Metadata (#1326)
nkovela1 Nov 23, 2023
38806fd
Automatically add the keras framework to kaggle handles (#1331)
mattdangerw Nov 29, 2023
e0d34dc
Fix a failing byte pair tokenizer test (#1336)
mattdangerw Nov 30, 2023
0820d62
Use set comparison for assets (#1335)
mattdangerw Nov 30, 2023
c4b0c3c
Fix whisper tokenizer saving (#1334)
mattdangerw Nov 30, 2023
e3f8d06
Remove special case Bart from_preset (#1333)
mattdangerw Nov 30, 2023
dbb6487
Fix t5 tokenizer presets (#1339)
mattdangerw Nov 30, 2023
6130253
Script to convert presets (#1340)
mattdangerw Nov 30, 2023
814959b
Switch all preset to the new Kaggle format (#1338)
mattdangerw Dec 1, 2023
2aced24
Let kagglehub select latest version (#1342)
mattdangerw Dec 4, 2023
245b7e9
Use the proper title for example (#1346)
Philmod Dec 5, 2023
6ad8a30
Update conversion script (#1347)
mattdangerw Dec 6, 2023
7cc4323
Improve preset error messages (#1349)
mattdangerw Dec 7, 2023
9cc8110
Use subclass checking check_preset_class (#1344)
mattdangerw Dec 7, 2023
4606f32
Add a hacky fix for TF 2.13 and 2.14 weights.h5 loading (#1353)
mattdangerw Dec 7, 2023
9cb5838
Another fix for saving on Keras 2 (#1354)
mattdangerw Dec 7, 2023
039ff45
Switch our preset to there final kaggle location (#1345)
mattdangerw Dec 7, 2023
9cc3f84
Fix rebase issue in bytepair tokenizer (#1366)
nkovela1 Dec 12, 2023
6f7f9a0
Change encoding to utf-8 to fix Kaggle branch test failure for PyTorc…
sampathweb Dec 13, 2023
ddfca77
Fix GPU test issue with Keras 2 (#1368)
nkovela1 Dec 14, 2023
0e43f09
Add in-place modification of file keys for backwards compatibility (#…
nkovela1 Dec 15, 2023
4d84eb1
Add file renaming logic for modification (#1370)
nkovela1 Dec 16, 2023
29a0ae5
Fix task pre-processor in tasks (#1373)
sampathweb Dec 20, 2023
401e569
Backwards compatible fix for functional model saving (#1378)
mattdangerw Jan 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Switch all preset to the new Kaggle format (#1338)
These are not uploaded to Kaggle just yet, but will be shortly.
  • Loading branch information
mattdangerw committed Jan 4, 2024
commit 814959b15756d1e18cfe4699d2e043c30f8ab139
76 changes: 4 additions & 72 deletions keras_nlp/models/albert/albert_presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,24 +26,7 @@
"path": "albert",
"model_card": "https://github.com/google-research/albert/blob/master/README.md",
},
"config": {
"vocabulary_size": 30000,
"num_layers": 12,
"num_heads": 12,
"num_groups": 1,
"num_inner_repetitions": 1,
"embedding_dim": 128,
"hidden_dim": 768,
"intermediate_dim": 3072,
"dropout": 0.0,
"max_sequence_length": 512,
"num_segments": 2,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_base_en_uncased/v1/model.h5",
"weights_hash": "b83ccf3418dd84adc569324183176813",
"spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_base_en_uncased/v1/vocab.spm",
"spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5",
"kaggle_handle": "gs://keras-nlp-kaggle/albert_base_en_uncased",
},
"albert_large_en_uncased": {
"metadata": {
Expand All @@ -56,24 +39,7 @@
"path": "albert",
"model_card": "https://github.com/google-research/albert/blob/master/README.md",
},
"config": {
"vocabulary_size": 30000,
"num_layers": 24,
"num_heads": 16,
"num_groups": 1,
"num_inner_repetitions": 1,
"embedding_dim": 128,
"hidden_dim": 1024,
"intermediate_dim": 4096,
"dropout": 0,
"max_sequence_length": 512,
"num_segments": 2,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_large_en_uncased/v1/model.h5",
"weights_hash": "c7754804efb245f06dd6e7ced32e082c",
"spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_large_en_uncased/v1/vocab.spm",
"spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5",
"kaggle_handle": "gs://keras-nlp-kaggle/albert_large_en_uncased",
},
"albert_extra_large_en_uncased": {
"metadata": {
Expand All @@ -86,24 +52,7 @@
"path": "albert",
"model_card": "https://github.com/google-research/albert/blob/master/README.md",
},
"config": {
"vocabulary_size": 30000,
"num_layers": 24,
"num_heads": 16,
"num_groups": 1,
"num_inner_repetitions": 1,
"embedding_dim": 128,
"hidden_dim": 2048,
"intermediate_dim": 8192,
"dropout": 0,
"max_sequence_length": 512,
"num_segments": 2,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_large_en_uncased/v1/model.h5",
"weights_hash": "713209be8aadfa614fd79f18c9aeb16d",
"spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_large_en_uncased/v1/vocab.spm",
"spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5",
"kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_large_en_uncased",
},
"albert_extra_extra_large_en_uncased": {
"metadata": {
Expand All @@ -116,23 +65,6 @@
"path": "albert",
"model_card": "https://github.com/google-research/albert/blob/master/README.md",
},
"config": {
"vocabulary_size": 30000,
"num_layers": 12,
"num_heads": 64,
"num_groups": 1,
"num_inner_repetitions": 1,
"embedding_dim": 128,
"hidden_dim": 4096,
"intermediate_dim": 16384,
"dropout": 0,
"max_sequence_length": 512,
"num_segments": 2,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_extra_large_en_uncased/v1/model.h5",
"weights_hash": "a835177b692fb6a82139f94c66db2f22",
"spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_extra_large_en_uncased/v1/vocab.spm",
"spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5",
"kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_extra_large_en_uncased",
},
}
32 changes: 3 additions & 29 deletions keras_nlp/models/backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os

from keras_nlp.backend import keras
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
Expand Down Expand Up @@ -68,31 +66,6 @@ def from_config(cls, config):
def presets(cls):
return {}

@classmethod
def _legacy_from_preset(
cls,
preset,
load_weights=True,
**kwargs,
):
metadata = cls.presets[preset]
config = metadata["config"]
model = cls.from_config({**config, **kwargs})

if not load_weights:
return model

filename = os.path.basename(metadata["weights_url"])
weights = keras.utils.get_file(
filename,
metadata["weights_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["weights_hash"],
)

model.load_weights(weights)
return model

@classmethod
def from_preset(
cls,
Expand Down Expand Up @@ -121,9 +94,10 @@ def from_preset(
)
```
"""
# TODO: delete me!
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
return cls._legacy_from_preset(preset, **kwargs)
preset = cls.presets[preset]["kaggle_handle"]

check_preset_class(preset, cls)
return load_from_preset(
Expand Down
33 changes: 3 additions & 30 deletions keras_nlp/models/bart/bart_presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,7 @@
"path": "bart",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md",
},
"config": {
"vocabulary_size": 50265,
"num_layers": 6,
"num_heads": 12,
"hidden_dim": 768,
"intermediate_dim": 3072,
"dropout": 0.1,
"max_sequence_length": 1024,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/model.h5",
"weights_hash": "5b59403f0cafafbd89680e0785791163",
"vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/vocab.json",
"vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354",
"merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/merges.txt",
"merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e",
"kaggle_handle": "gs://keras-nlp-kaggle/bart_base_en",
},
"bart_large_en": {
"metadata": {
Expand All @@ -62,13 +47,7 @@
"dropout": 0.1,
"max_sequence_length": 1024,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/model.h5",
"weights_hash": "6bfe7e591af8c5699ce6f9f18753af9a",
"vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/vocab.json",
"vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596",
"merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/merges.txt",
"merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e",
"kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en",
},
"bart_large_en_cnn": {
"metadata": {
Expand All @@ -90,12 +69,6 @@
"dropout": 0.1,
"max_sequence_length": 1024,
},
"preprocessor_config": {},
"weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/model.h5",
"weights_hash": "99782ecd9365956f016096fef9afd62c",
"vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/vocab.json",
"vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354",
"merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/merges.txt",
"merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e",
"kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en_cnn",
},
}
4 changes: 2 additions & 2 deletions keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ class BartSeq2SeqLMPreprocessor(BartPreprocessor):
def __init__(
self,
tokenizer,
encoder_sequence_length,
decoder_sequence_length,
encoder_sequence_length=1024,
decoder_sequence_length=1024,
**kwargs
):
# Since we truncate the last token from `decoder_token_ids`, we need to
Expand Down
Loading