1
+ {
2
+ "cells" : [
3
+ {
4
+ "cell_type" : " code" ,
5
+ "source" : [],
6
+ "metadata" : {
7
+ "id" : " 4jVhBI-bTcHp"
8
+ },
9
+ "execution_count" : null ,
10
+ "outputs" : []
11
+ },
12
+ {
13
+ "cell_type" : " markdown" ,
14
+ "metadata" : {
15
+ "id" : " QmRAEtvJ_6OT"
16
+ },
17
+ "source" : [
18
+ " # **Deep Learning With Python - CHAPTER 14**"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type" : " markdown" ,
23
+ "metadata" : {
24
+ "id" : " 8HE2uMKK_7Qp"
25
+ },
26
+ "source" : [
27
+ " - This script provides a modular and well-structured implementation of various deep learning models using TensorFlow and Keras.\n " ,
28
+ " \n " ,
29
+ " - It defines separate classes for different model architectures, including **Dense-based**, **CNN-based**, **LSTM-based**, and **Transformer-based** models. Each class encapsulates the logic for building and compiling models with configurable parameters, making the code highly reusable and flexible.\n " ,
30
+ " \n " ,
31
+ " - The `DenseModel` class handles fully connected networks for classification and regression tasks, `CNNModel` builds convolutional networks for image processing, `LSTMModel` constructs recurrent networks for sequence data, and `TransformerModel` implements transformer-based models for NLP tasks.\n " ,
32
+ " \n " ,
33
+ " - This structured approach improves readability, maintainability, and scalability, making it easier to extend or modify individual components."
34
+ ]
35
+ },
36
+ {
37
+ "cell_type" : " code" ,
38
+ "execution_count" : 17 ,
39
+ "metadata" : {
40
+ "id" : " oqNg9frRCBgk"
41
+ },
42
+ "outputs" : [],
43
+ "source" : [
44
+ " import tensorflow as tf\n " ,
45
+ " from tensorflow import keras\n " ,
46
+ " from tensorflow.keras import layers"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type" : " code" ,
51
+ "execution_count" : 18 ,
52
+ "metadata" : {
53
+ "id" : " 7HHCQEtlu8Pg"
54
+ },
55
+ "outputs" : [],
56
+ "source" : [
57
+ " class DenseModel:\n " ,
58
+ " \"\"\" Class for creating fully connected (Dense) models for different tasks.\"\"\"\n " ,
59
+ " def __init__(self, input_shape, num_units=32):\n " ,
60
+ " self.input_shape = input_shape\n " ,
61
+ " self.num_units = num_units\n " ,
62
+ " \n " ,
63
+ " def build_binary_classification_model(self):\n " ,
64
+ " inputs = keras.Input(shape=(self.input_shape,))\n " ,
65
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(inputs)\n " ,
66
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(x)\n " ,
67
+ " outputs = layers.Dense(1, activation=\" sigmoid\" )(x)\n " ,
68
+ " model = keras.Model(inputs, outputs)\n " ,
69
+ " model.compile(optimizer=\" rmsprop\" , loss=\" binary_crossentropy\" )\n " ,
70
+ " return model\n " ,
71
+ " \n " ,
72
+ " def build_multiclass_classification_model(self, num_classes):\n " ,
73
+ " inputs = keras.Input(shape=(self.input_shape,))\n " ,
74
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(inputs)\n " ,
75
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(x)\n " ,
76
+ " outputs = layers.Dense(num_classes, activation=\" softmax\" )(x)\n " ,
77
+ " model = keras.Model(inputs, outputs)\n " ,
78
+ " model.compile(optimizer=\" rmsprop\" , loss=\" categorical_crossentropy\" )\n " ,
79
+ " return model\n " ,
80
+ " \n " ,
81
+ " def build_multilabel_classification_model(self, num_classes):\n " ,
82
+ " inputs = keras.Input(shape=(self.input_shape,))\n " ,
83
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(inputs)\n " ,
84
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(x)\n " ,
85
+ " outputs = layers.Dense(num_classes, activation=\" sigmoid\" )(x)\n " ,
86
+ " model = keras.Model(inputs, outputs)\n " ,
87
+ " model.compile(optimizer=\" rmsprop\" , loss=\" binary_crossentropy\" )\n " ,
88
+ " return model\n " ,
89
+ " \n " ,
90
+ " def build_regression_model(self, num_values):\n " ,
91
+ " inputs = keras.Input(shape=(self.input_shape,))\n " ,
92
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(inputs)\n " ,
93
+ " x = layers.Dense(self.num_units, activation=\" relu\" )(x)\n " ,
94
+ " outputs = layers.Dense(num_values)(x)\n " ,
95
+ " model = keras.Model(inputs, outputs)\n " ,
96
+ " model.compile(optimizer=\" rmsprop\" , loss=\" mse\" )\n " ,
97
+ " return model"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type" : " code" ,
102
+ "source" : [
103
+ " class CNNModel:\n " ,
104
+ " \"\"\" Class for creating CNN-based models.\"\"\"\n " ,
105
+ " def __init__(self, input_shape, num_classes):\n " ,
106
+ " self.input_shape = input_shape\n " ,
107
+ " self.num_classes = num_classes\n " ,
108
+ " \n " ,
109
+ " def build_model(self):\n " ,
110
+ " inputs = keras.Input(shape=self.input_shape)\n " ,
111
+ " x = layers.SeparableConv2D(32, 3, activation=\" relu\" )(inputs)\n " ,
112
+ " x = layers.SeparableConv2D(64, 3, activation=\" relu\" )(x)\n " ,
113
+ " x = layers.MaxPooling2D(2)(x)\n " ,
114
+ " x = layers.SeparableConv2D(64, 3, activation=\" relu\" )(x)\n " ,
115
+ " x = layers.SeparableConv2D(128, 3, activation=\" relu\" )(x)\n " ,
116
+ " x = layers.MaxPooling2D(2)(x)\n " ,
117
+ " x = layers.SeparableConv2D(64, 3, activation=\" relu\" )(x)\n " ,
118
+ " x = layers.SeparableConv2D(128, 3, activation=\" relu\" )(x)\n " ,
119
+ " x = layers.GlobalAveragePooling2D()(x)\n " ,
120
+ " x = layers.Dense(32, activation=\" relu\" )(x)\n " ,
121
+ " outputs = layers.Dense(self.num_classes, activation=\" softmax\" )(x)\n " ,
122
+ " model = keras.Model(inputs, outputs)\n " ,
123
+ " model.compile(optimizer=\" rmsprop\" , loss=\" categorical_crossentropy\" )\n " ,
124
+ " return model"
125
+ ],
126
+ "metadata" : {
127
+ "id" : " 6mitfoIAJY0r"
128
+ },
129
+ "execution_count" : 19 ,
130
+ "outputs" : []
131
+ },
132
+ {
133
+ "cell_type" : " code" ,
134
+ "source" : [
135
+ " class LSTMModel:\n " ,
136
+ " \"\"\" Class for creating LSTM-based models for sequence processing.\"\"\"\n " ,
137
+ " def __init__(self, input_shape, num_classes):\n " ,
138
+ " self.input_shape = input_shape\n " ,
139
+ " self.num_classes = num_classes\n " ,
140
+ " \n " ,
141
+ " def build_single_layer_lstm(self):\n " ,
142
+ " inputs = keras.Input(shape=self.input_shape)\n " ,
143
+ " x = layers.LSTM(32)(inputs)\n " ,
144
+ " outputs = layers.Dense(self.num_classes, activation=\" sigmoid\" )(x)\n " ,
145
+ " model = keras.Model(inputs, outputs)\n " ,
146
+ " model.compile(optimizer=\" rmsprop\" , loss=\" binary_crossentropy\" )\n " ,
147
+ " return model\n " ,
148
+ " \n " ,
149
+ " def build_multi_layer_lstm(self):\n " ,
150
+ " inputs = keras.Input(shape=self.input_shape)\n " ,
151
+ " x = layers.LSTM(32, return_sequences=True)(inputs)\n " ,
152
+ " x = layers.LSTM(32, return_sequences=True)(x)\n " ,
153
+ " x = layers.LSTM(32)(x)\n " ,
154
+ " outputs = layers.Dense(self.num_classes, activation=\" sigmoid\" )(x)\n " ,
155
+ " model = keras.Model(inputs, outputs)\n " ,
156
+ " model.compile(optimizer=\" rmsprop\" , loss=\" binary_crossentropy\" )\n " ,
157
+ " return model\n "
158
+ ],
159
+ "metadata" : {
160
+ "id" : " qr8jz4GeLVbT"
161
+ },
162
+ "execution_count" : 20 ,
163
+ "outputs" : []
164
+ },
165
+ {
166
+ "cell_type" : " code" ,
167
+ "source" : [
168
+ " class TransformerModel:\n " ,
169
+ " \"\"\" Class for creating Transformer-based models.\"\"\"\n " ,
170
+ " def __init__(self, sequence_length, vocab_size, embed_dim, dense_dim, num_heads):\n " ,
171
+ " self.sequence_length = sequence_length\n " ,
172
+ " self.vocab_size = vocab_size\n " ,
173
+ " self.embed_dim = embed_dim\n " ,
174
+ " self.dense_dim = dense_dim\n " ,
175
+ " self.num_heads = num_heads\n " ,
176
+ " \n " ,
177
+ " def build_encoder_decoder_model(self):\n " ,
178
+ " encoder_inputs = keras.Input(shape=(self.sequence_length,), dtype=\" int64\" )\n " ,
179
+ " x = PositionalEmbedding(self.sequence_length, self.vocab_size, self.embed_dim)(encoder_inputs)\n " ,
180
+ " encoder_outputs = TransformerEncoder(self.embed_dim, self.dense_dim, self.num_heads)(x)\n " ,
181
+ " \n " ,
182
+ " decoder_inputs = keras.Input(shape=(None,), dtype=\" int64\" )\n " ,
183
+ " x = PositionalEmbedding(self.sequence_length, self.vocab_size, self.embed_dim)(decoder_inputs)\n " ,
184
+ " x = TransformerDecoder(self.embed_dim, self.dense_dim, self.num_heads)(x, encoder_outputs)\n " ,
185
+ " decoder_outputs = layers.Dense(self.vocab_size, activation=\" softmax\" )(x)\n " ,
186
+ " \n " ,
187
+ " transformer = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)\n " ,
188
+ " transformer.compile(optimizer=\" rmsprop\" , loss=\" categorical_crossentropy\" )\n " ,
189
+ " return transformer\n " ,
190
+ " \n " ,
191
+ " def build_text_classification_model(self):\n " ,
192
+ " inputs = keras.Input(shape=(self.sequence_length,), dtype=\" int64\" )\n " ,
193
+ " x = PositionalEmbedding(self.sequence_length, self.vocab_size, self.embed_dim)(inputs)\n " ,
194
+ " x = TransformerEncoder(self.embed_dim, self.dense_dim, self.num_heads)(x)\n " ,
195
+ " x = layers.GlobalMaxPooling1D()(x)\n " ,
196
+ " outputs = layers.Dense(1, activation=\" sigmoid\" )(x)\n " ,
197
+ " model = keras.Model(inputs, outputs)\n " ,
198
+ " model.compile(optimizer=\" rmsprop\" , loss=\" binary_crossentropy\" )\n " ,
199
+ " return model"
200
+ ],
201
+ "metadata" : {
202
+ "id" : " 5KU0n2MpLXsb"
203
+ },
204
+ "execution_count" : 21 ,
205
+ "outputs" : []
206
+ },
207
+ {
208
+ "cell_type" : " code" ,
209
+ "source" : [
210
+ " if __name__ == \" __main__\" :\n " ,
211
+ " num_input_features = 20\n " ,
212
+ " num_classes = 5\n " ,
213
+ " num_values = 1\n " ,
214
+ " height, width, channels = 32, 32, 3\n " ,
215
+ " num_timesteps, num_features = 10, 15\n " ,
216
+ " sequence_length, vocab_size, embed_dim, dense_dim, num_heads = 100, 20000, 64, 256, 8\n " ,
217
+ " \n " ,
218
+ " dense_model = DenseModel(num_input_features)\n " ,
219
+ " binary_model = dense_model.build_binary_classification_model()\n " ,
220
+ " multi_class_model = dense_model.build_multiclass_classification_model(num_classes)\n " ,
221
+ " regression_model = dense_model.build_regression_model(num_values)\n " ,
222
+ " \n " ,
223
+ " cnn_model = CNNModel((height, width, channels), num_classes).build_model()\n " ,
224
+ " lstm_model = LSTMModel((num_timesteps, num_features), num_classes)\n " ,
225
+ " single_lstm = lstm_model.build_single_layer_lstm()\n " ,
226
+ " multi_lstm = lstm_model.build_multi_layer_lstm()\n " ,
227
+ " \n " ,
228
+ " transformer = TransformerModel(sequence_length, vocab_size, embed_dim, dense_dim, num_heads)\n " ,
229
+ " transformer_text_model = transformer.build_text_classification_model()"
230
+ ],
231
+ "metadata" : {
232
+ "id" : " 2Wr8ixy2TnVw"
233
+ },
234
+ "execution_count" : null ,
235
+ "outputs" : []
236
+ }
237
+ ],
238
+ "metadata" : {
239
+ "colab" : {
240
+ "provenance" : []
241
+ },
242
+ "kernelspec" : {
243
+ "display_name" : " Python 3" ,
244
+ "name" : " python3"
245
+ },
246
+ "language_info" : {
247
+ "name" : " python"
248
+ }
249
+ },
250
+ "nbformat" : 4 ,
251
+ "nbformat_minor" : 0
252
+ }
0 commit comments