AttributeError: 'Node' object has no attribute 'output_masks'
ranjeetthakur opened this issue · comments
I'm getting above mentioned error on this line of the code -
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
Here is my model
def build_model(shape, params):
input_dim, hidden_units, num_classes = shape
audio_sequence = Input(shape=(None,input_dim),dtype='float32',name='audio')
encode = _Encoding(input_dim, hidden_units, hidden_units, params['kernel_size'], params['dropout'], params['L2'])
hide = _Hidden(2*hidden_units, params['dropout'], params['L2'])
attend = _Attention(2*hidden_units)
classify = _Classifier(2*hidden_units, num_classes, params['dropout'], params['L2'])
encoding = encode(audio_sequence)
hidden = hide(encoding)
attention_weight = attend(hidden)
align = _align(encoding, attention_weight)
scores = classify(align)
model = Model(inputs=[audio_sequence], outputs=[scores])
model.compile(optimizer=Adam(lr=params['lr']),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
class _Encoding(object):
def __init__(self, input_dim, num_filters, hidden_units, kernel_size, dropout=0.0, L2=10**-5):
self.model = Sequential()
self.model.add(Conv1D(num_filters,
kernel_size,
padding='same',
activation='relu',
kernel_regularizer=l2(L2),
input_shape=(None, input_dim)
))
self.model.add(Bidirectional(LSTM(hidden_units,
dropout=dropout,
recurrent_dropout=dropout,
return_sequences=True,
unroll=False,
)))
def __call__(self, sequence):
return self.model(sequence)
class _Hidden(object):
def __init__(self, hidden_units, dropout=0.0, L2=10**-5):
self.model = Sequential()
self.model.add(Dropout(dropout, input_shape=(hidden_units,)))
self.model.add(Dense(hidden_units,
activation='tanh',
name='attend',
kernel_initializer='he_normal',
kernel_regularizer=l2(L2),
))
self.model = TimeDistributed(self.model)
def __call__(self, encoding):
return self.model(encoding)
class _Attention(object):
def __init__(self, input_dim):
self.model = Sequential()
self.model.add(Dense(1,
activation=None,
use_bias=False,
kernel_initializer='he_normal',
input_shape=(input_dim,),
))
self.model = TimeDistributed(self.model)
self.output = Sequential()
self.output.add(Flatten())
self.output.add(Activation('softmax'))
def __call__(self, encoding):
return self.output(self.model(encoding))
def _align(encoding, attention_weight):
return Dot((1,1), normalize=False)([encoding, attention_weight])
class _Classifier(object):
def __init__(self, hidden_units, output_dim, dropout=0.0, L2=10**-5):
self.model = Sequential()
self.model.add(Dropout(dropout, input_shape=(hidden_units,)))
self.model.add(Dense(hidden_units,
activation='relu',
name='classifier_hidden',
kernel_initializer='he_normal',
kernel_regularizer=l2(L2),
))
self.model.add(Dense(output_dim,
activation='softmax',
name='scores',
kernel_initializer='he_normal',
kernel_regularizer=l2(L2),
))
def __call__(self, align):
return self.model(align)
def test_build():
shape = (26, 100, 5)
params = {}
params['dropout'] = 0.0
params['L2'] = 10**-5
params['kernel_size'] = 5
params['lr'] = 0.001
return build_model(shape, params)
model = test_build()
Can please someone help me ?