Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
a089a8b
Add VGG16 backbone (#1737)
divyashreepathihalli Aug 8, 2024
73b7bad
Add `ResNetBackbone` and `ResNetImageClassifier` (#1765)
james77777778 Aug 12, 2024
26afc7e
Add CSP DarkNet backbone and classifier (#1774)
sachinprasadhs Aug 15, 2024
00ab4d5
Add `FeaturePyramidBackbone` and port weights from `timm` for `ResNet…
james77777778 Aug 15, 2024
9860756
Add DenseNet (#1775)
sachinprasadhs Aug 16, 2024
ececd14
Merge remote-tracking branch 'upstream/master' into keras-hub
divyashreepathihalli Aug 16, 2024
fd6f977
Add ViTDetBackbone (#1776)
divyashreepathihalli Aug 20, 2024
fc485d6
Add Mix transformer (#1780)
sachinprasadhs Aug 20, 2024
2797851
update input_image_shape -> image_shape (#1785)
divyashreepathihalli Aug 21, 2024
18f8880
Create __init__.py (#1788)
sachinprasadhs Aug 22, 2024
2ee893c
Hack package build script to rename to keras-hub (#1793)
mattdangerw Aug 26, 2024
fdf6b6b
Add CLIP and T5XXL for StableDiffusionV3 (#1790)
james77777778 Aug 26, 2024
18dddf4
Add DeepLabV3Plus segmentation
sachinprasadhs Aug 26, 2024
744b233
init file
sachinprasadhs Aug 26, 2024
98c0811
api gen
sachinprasadhs Aug 26, 2024
b40617c
Add Segmentation base class
sachinprasadhs Aug 26, 2024
7470b84
format fix
sachinprasadhs Aug 27, 2024
68a5a62
add dependency package
sachinprasadhs Aug 27, 2024
8473170
nit
sachinprasadhs Aug 28, 2024
beae2f4
Add Bounding Box Utils (#1791)
sineeli Aug 28, 2024
9289ab7
mobilenet_v3 added in keras-nlp (#1782)
ushareng Aug 28, 2024
09f470f
Pkgoogle/efficient net migration (#1778)
pkgoogle Aug 28, 2024
be8888d
Add the ResNet_vd backbone (#1766)
gowthamkpr Aug 28, 2024
536474a
Add `VAEImageDecoder` for StableDiffusionV3 (#1796)
james77777778 Aug 28, 2024
0fbd84b
Replace `Backbone` with `keras.Model` in `CLIPTextEncoder` and `T5XXL…
james77777778 Aug 28, 2024
9143468
Add pyramid output for densenet, cspDarknet (#1801)
sachinprasadhs Sep 3, 2024
791d7f6
Add `MMDiT` for StableDiffusionV3 (#1806)
james77777778 Sep 4, 2024
339669f
Add remaining bbox utils (#1804)
sineeli Sep 4, 2024
0a978d2
Merge remote-tracking branch 'upstream/keras-hub' into segmentation
sachinprasadhs Sep 4, 2024
f31ad9c
Add Deeplabv3 and v3plus in the same backbone and segmenter
sachinprasadhs Sep 17, 2024
2d82550
Merge 'upstream/keras-hub' into segmentation
sachinprasadhs Sep 17, 2024
fc1a3a5
fix imports
sachinprasadhs Sep 17, 2024
c172031
nit
sachinprasadhs Sep 17, 2024
3b6c045
testcase changes
sachinprasadhs Sep 17, 2024
704d119
Segmeter >> ImageSegmenter
sachinprasadhs Sep 17, 2024
64050d5
resolve conflict
sachinprasadhs Sep 17, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge 'upstream/keras-hub' into segmentation
  • Loading branch information
sachinprasadhs committed Sep 17, 2024
commit 2d82550b5960f8e28f121f6e470a3ea2b02223cc
23 changes: 9 additions & 14 deletions keras_nlp/api/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
)
from keras_nlp.src.models.bloom.bloom_tokenizer import BloomTokenizer
from keras_nlp.src.models.causal_lm import CausalLM
from keras_nlp.src.models.classifier import Classifier
from keras_nlp.src.models.causal_lm_preprocessor import CausalLMPreprocessor
from keras_nlp.src.models.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
Expand Down Expand Up @@ -96,16 +96,16 @@
from keras_nlp.src.models.deberta_v3.deberta_v3_tokenizer import (
DebertaV3Tokenizer,
)
from keras_nlp.src.models.densenet.densenet_backbone import DenseNetBackbone
from keras_nlp.src.models.densenet.densenet_image_classifier import (
DenseNetImageClassifier,
)
from keras_nlp.src.models.deeplab_v3.deeplab_v3_backbone import (
DeepLabV3Backbone,
)
from keras_nlp.src.models.deeplab_v3.deeplab_v3_segmenter import (
DeepLabV3ImageSegmenter,
)
from keras_nlp.src.models.densenet.densenet_backbone import DenseNetBackbone
from keras_nlp.src.models.densenet.densenet_image_classifier import (
DenseNetImageClassifier,
)
from keras_nlp.src.models.distil_bert.distil_bert_backbone import (
DistilBertBackbone,
)
Expand Down Expand Up @@ -134,10 +134,6 @@
EfficientNetBackbone,
)
from keras_nlp.src.models.electra.electra_backbone import ElectraBackbone
from keras_nlp.src.models.electra.electra_preprocessor import (
ElectraPreprocessor,
)
from keras_nlp.src.models.electra.electra_backbone import ElectraBackbone
from keras_nlp.src.models.electra.electra_tokenizer import ElectraTokenizer
from keras_nlp.src.models.f_net.f_net_backbone import FNetBackbone
from keras_nlp.src.models.f_net.f_net_masked_lm import FNetMaskedLM
Expand Down Expand Up @@ -266,11 +262,10 @@
from keras_nlp.src.models.t5.t5_backbone import T5Backbone
from keras_nlp.src.models.t5.t5_tokenizer import T5Tokenizer
from keras_nlp.src.models.task import Task
from keras_nlp.src.models.vgg.vgg_backbone import VGGBackbone
from keras_nlp.src.models.vgg.vgg_image_classifier import VGGImageClassifier
from keras_nlp.src.models.vit_det.vit_det_backbone import ViTDetBackbone
from keras_nlp.src.models.whisper.whisper_audio_feature_extractor import (
WhisperAudioFeatureExtractor,
from keras_nlp.src.models.text_classifier import TextClassifier
from keras_nlp.src.models.text_classifier import TextClassifier as Classifier
from keras_nlp.src.models.text_classifier_preprocessor import (
TextClassifierPreprocessor,
)
from keras_nlp.src.models.vgg.vgg_backbone import VGGBackbone
from keras_nlp.src.models.vgg.vgg_image_classifier import VGGImageClassifier
Expand Down
13 changes: 9 additions & 4 deletions keras_nlp/src/bounding_box/utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,19 @@ def test_clip_to_image_standard(self):
bounding_boxes, bounding_box_format="xyxy", images=image
)
boxes = bounding_boxes["boxes"]
self.assertAllGreaterEqual(boxes, 0)
self.assertAllGreaterEqual(ops.convert_to_numpy(boxes), 0)
(
x1,
y1,
x2,
y2,
) = ops.split(boxes, 4, axis=1)
self.assertAllLessEqual(ops.concatenate([x1, x2], axis=1), width)
self.assertAllLessEqual(ops.concatenate([y1, y2], axis=1), height)
self.assertAllLessEqual(
ops.convert_to_numpy(ops.concatenate([x1, x2], axis=1)), width
)
self.assertAllLessEqual(
ops.convert_to_numpy(ops.concatenate([y1, y2], axis=1)), height
)
# Test relative format batched
image = ops.ones(shape=(1, height, width, 3))

Expand All @@ -51,7 +55,8 @@ def test_clip_to_image_standard(self):
bounding_boxes = utils.clip_to_image(
bounding_boxes, bounding_box_format="rel_xyxy", images=image
)
self.assertAllLessEqual(bounding_boxes["boxes"], 1)
boxes = bounding_boxes["boxes"]
self.assertAllLessEqual(ops.convert_to_numpy(boxes), 1)

def test_clip_to_image_filters_fully_out_bounding_boxes(self):
# Test xyxy format unbatched
Expand Down
14 changes: 3 additions & 11 deletions keras_nlp/src/models/backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@
from keras_nlp.src.utils.preset_utils import save_metadata
from keras_nlp.src.utils.preset_utils import save_serialized_object
from keras_nlp.src.utils.python_utils import classproperty
from keras_nlp.src.utils.timm.convert import load_timm_backbone
from keras_nlp.src.utils.transformers.convert import load_transformers_backbone


@keras_nlp_export("keras_nlp.models.Backbone")
Expand Down Expand Up @@ -193,15 +191,9 @@ class like `keras_nlp.models.Backbone.from_preset()`, or from
)
```
"""
format = check_format(preset)

if format == "transformers":
return load_transformers_backbone(cls, preset, load_weights)
elif format == "timm":
return load_timm_backbone(cls, preset, load_weights, **kwargs)

preset_cls = check_config_class(preset)
if not issubclass(preset_cls, cls):
loader = get_preset_loader(preset)
backbone_cls = loader.check_backbone_class()
if not issubclass(backbone_cls, cls):
raise ValueError(
f"Saved preset has type `{backbone_cls.__name__}` which is not "
f"a subclass of calling class `{cls.__name__}`. Call "
Expand Down
19 changes: 4 additions & 15 deletions keras_nlp/src/models/stable_diffusion_v3/clip_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
from keras_nlp.src.models.stable_diffusion_v3.clip_tokenizer import (
CLIPTokenizer,
)
from keras_nlp.src.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.src.utils.tensor_utils import preprocessing_function

try:
import tensorflow as tf
Expand Down Expand Up @@ -65,22 +63,13 @@ def build(self, input_shape):
)
self.built = True

# TODO: Use `@tf_preprocessing_function` after rebasing.
@preprocessing_function
def call(self, x, y=None, sample_weight=None, sequence_length=None):
x = convert_inputs_to_list_of_tensor_segments(x)
if len(x) != 1:
raise ValueError(
"T5XXL requires each input feature to contain only "
f"one segment, but received {len(x)}. If you are using T5XXL"
" for a multi-segment classification task, please refer to "
"classification models like BERT or RoBERTa."
)
if self.to_lower:
x = tf.strings.lower(x)
sequence_length = sequence_length or self.sequence_length
token_ids, padding_mask = self.packer(
self.tokenizer(x[0]),
sequence_length=sequence_length,
self.tokenizer(x),
sequence_length=sequence_length or self.sequence_length,
add_start_value=self.add_start_token,
add_end_value=self.add_end_token,
)
Expand Down
18 changes: 4 additions & 14 deletions keras_nlp/src/models/stable_diffusion_v3/t5_xxl_preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
from keras_nlp.src.layers.preprocessing.start_end_packer import StartEndPacker
from keras_nlp.src.models.preprocessor import Preprocessor
from keras_nlp.src.models.t5.t5_tokenizer import T5Tokenizer
from keras_nlp.src.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.src.utils.tensor_utils import preprocessing_function


class T5XXLPreprocessor(Preprocessor):
Expand Down Expand Up @@ -50,19 +48,11 @@ def build(self, input_shape):
)
self.built = True

@preprocessing_function
def call(self, x, y=None, sample_weight=None, sequence_length=None):
x = convert_inputs_to_list_of_tensor_segments(x)
if len(x) != 1:
raise ValueError(
"T5XXL requires each input feature to contain only "
f"one segment, but received {len(x)}. If you are using T5XXL"
" for a multi-segment classification task, please refer to "
"classification models like BERT or RoBERTa."
)
sequence_length = sequence_length or self.sequence_length
token_ids, padding_mask = self.packer(
self.tokenizer(x[0]),
sequence_length=sequence_length,
self.tokenizer(x),
sequence_length=sequence_length or self.sequence_length,
add_start_value=self.add_start_token,
add_end_value=self.add_end_token,
)
Expand Down
30 changes: 1 addition & 29 deletions keras_nlp/src/utils/preset_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,35 +571,7 @@ def load_json(preset, config_file=CONFIG_FILE):
return config


def check_format(preset):
if check_file_exists(preset, SAFETENSOR_FILE) or check_file_exists(
preset, SAFETENSOR_CONFIG_FILE
):
# Determine the format by parsing the config file.
config = load_config(preset, HF_CONFIG_FILE)
if "hf://timm" in preset or "architecture" in config:
return "timm"
return "transformers"

if not check_file_exists(preset, METADATA_FILE):
raise FileNotFoundError(
f"The preset directory `{preset}` doesn't have a file named `{METADATA_FILE}`, "
"or you do not have access to it. This file is required to load a Keras model "
"preset. Please verify that the model you are trying to load is a Keras model."
)
metadata = load_config(preset, METADATA_FILE)
if "keras_version" not in metadata:
raise ValueError(
f"`{METADATA_FILE}` in the preset directory `{preset}` doesn't have `keras_version`. "
"Please verify that the model you are trying to load is a Keras model."
)
return "keras"


def load_serialized_object(preset, config_file=CONFIG_FILE, **kwargs):
kwargs = kwargs or {}
config = load_config(preset, config_file)

def load_serialized_object(config, **kwargs):
# `dtype` in config might be a serialized `DTypePolicy` or `DTypePolicyMap`.
# Ensure that `dtype` is properly configured.
dtype = kwargs.pop("dtype", None)
Expand Down
25 changes: 7 additions & 18 deletions keras_nlp/src/utils/timm/convert_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,9 @@
# limitations under the License.
import numpy as np

from keras_nlp.src.utils.preset_utils import HF_CONFIG_FILE
from keras_nlp.src.utils.preset_utils import jax_memory_cleanup
from keras_nlp.src.utils.preset_utils import load_config
from keras_nlp.src.utils.transformers.safetensor_utils import SafetensorLoader
from keras_nlp.src.models.resnet.resnet_backbone import ResNetBackbone

backbone_cls = ResNetBackbone


def convert_backbone_config(timm_config):
Expand Down Expand Up @@ -56,6 +55,8 @@ def convert_backbone_config(timm_config):
stackwise_num_strides=[1, 2, 2, 2],
block_type=block_type,
use_pre_activation=use_pre_activation,
input_conv_filters=[64],
input_conv_kernel_sizes=[7],
)


Expand Down Expand Up @@ -100,10 +101,10 @@ def port_batch_normalization(keras_layer_name, hf_weight_prefix):
for stack_index in range(num_stacks):
for block_idx in range(backbone.stackwise_num_blocks[stack_index]):
if version == "v1":
keras_name = f"v1_stack{stack_index}_block{block_idx}"
keras_name = f"stack{stack_index}_block{block_idx}"
hf_name = f"layer{stack_index+1}.{block_idx}"
else:
keras_name = f"v2_stack{stack_index}_block{block_idx}"
keras_name = f"stack{stack_index}_block{block_idx}"
hf_name = f"stages.{stack_index}.blocks.{block_idx}"

if version == "v1":
Expand Down Expand Up @@ -157,15 +158,3 @@ def port_batch_normalization(keras_layer_name, hf_weight_prefix):
normalization_layer.input_mean = mean
normalization_layer.input_variance = [s**2 for s in std]
normalization_layer.build(normalization_layer._build_input_shape)


def load_resnet_backbone(cls, preset, load_weights, **kwargs):
timm_config = load_config(preset, HF_CONFIG_FILE)
keras_config = convert_backbone_config(timm_config)
backbone = cls(**keras_config, **kwargs)
if load_weights:
jax_memory_cleanup(backbone)
# Use prefix="" to avoid using `get_prefixed_key`.
with SafetensorLoader(preset, prefix="") as loader:
convert_weights(backbone, loader, timm_config)
return backbone
You are viewing a condensed version of this merge commit. You can view the full changes here.