num_tags = 12# Number of unique issue tags num_words = 10000# Size of vocabulary obtained when preprocessing text data num_departments = 4# Number of departments for predictions
title_input = keras.Input(shape=(None,), name='title') # Variable-length sequence of ints body_input = keras.Input(shape=(None,), name='body') # Variable-length sequence of ints tags_input = keras.Input(shape=(num_tags,), name='tags') # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector title_features = layers.Embedding(num_words, 64)(title_input) # Embed each word in the text into a 64-dimensional vector body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector title_features = layers.LSTM(128)(title_features) # Reduce sequence of embedded words in the body into a single 32-dimensional vector body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features priority_pred = layers.Dense(1, name='priority')(x) # Stick a department classifier on top of the features department_pred = layers.Dense(num_departments, name='department')(x)
# Instantiate an end-to-end model predicting both priority and department model = keras.Model(inputs=[title_input, body_input, tags_input], outputs=[priority_pred, department_pred])
inputs = keras.Input(shape=(32, 32, 3), name='img') x = layers.Conv2D(32, 3, activation='relu')(inputs) x = layers.Conv2D(64, 3, activation='relu')(x) block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output) x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output) x = layers.Conv2D(64, 3, activation='relu', padding='same')(x) block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation='relu')(block_3_output) x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(256, activation='relu')(x) x = layers.Dropout(0.5)(x) outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name='toy_resnet') model.summary()
defcall(self, inputs): x = self.dense_1(inputs) return self.dense_2(x)
# Instantiate the model. mlp = MLP() # Necessary to create the model's state. # The model doesn't have a state until it's called at least once. _ = mlp(tf.zeros((1, 32)))
# Define a Functional model inputs = keras.Input((None, units)) x = layers.GlobalAveragePooling1D()(inputs) outputs = layers.Dense(1)(x) model = keras.Model(inputs, outputs)
classCustomRNN(layers.Layer):
def__init__(self): super(CustomRNN, self).__init__() self.units = units self.projection_1 = layers.Dense(units=units, activation='tanh') self.projection_2 = layers.Dense(units=units, activation='tanh') # Our previously-defined Functional model self.classifier = model
defcall(self, inputs): outputs = [] state = tf.zeros(shape=(inputs.shape[0], self.units)) for t inrange(inputs.shape[1]): x = inputs[:, t, :] h = self.projection_1(x) y = h + self.projection_2(state) state = y outputs.append(y) features = tf.stack(outputs, axis=1) print(features.shape) return self.classifier(features)
defcall(self, inputs): outputs = [] state = tf.zeros(shape=(inputs.shape[0], self.units)) for t inrange(inputs.shape[1]): x = inputs[:, t, :] h = self.projection_1(x) y = h + self.projection_2(state) state = y outputs.append(y) features = tf.stack(outputs, axis=1) return self.classifier(features)
# Note that we specify a static batch size for the inputs with the `batch_shape` # arg, because the inner computation of `CustomRNN` requires a static batch size # (when we create the `state` zeros tensor). inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim)) x = layers.Conv1D(32, 3)(inputs) outputs = CustomRNN()(x)