2017-09-01 87 views
0

我想在keras中創建自定義優化器。爲此,我以自定義方式重新實現了sgd,我的意思是我爲此定義了類(MLP for binary classisification),我將其優化器命名爲「myopt」。 Follwoing是代碼:Keras Custom Optimizer_legacy.interfaces

from __future__ import absolute_import 
import tensorflow as tf 
import six 
import copy 
from six.moves import zip 
from keras.utils.generic_utils import serialize_keras_object 
from keras.utils.generic_utils import deserialize_keras_object 
from keras.legacy import interfaces 
from keras import backend as K 
import numpy as np 
from keras.models import Sequential 
from keras.layers import Dense, Dropout 
# Generate dummy data 
x_train = np.random.random((1000, 20)) 
y_train = np.random.randint(2, size=(1000, 1)) 
x_test = np.random.random((100, 20)) 
y_test = np.random.randint(2, size=(100, 1)) 

model = Sequential() 
model.add(Dense(64, input_dim=20, activation='relu')) 
model.add(Dropout(0.5)) 
model.add(Dense(64, activation='relu')) 
model.add(Dropout(0.5)) 
model.add(Dense(1, activation='sigmoid')) 

def clip_norm(g, c, n): 
    if c <= 0: # if clipnorm == 0 no need to add ops to the graph 
     return g 

    # tf require using a special op to multiply IndexedSliced by scalar 
    if K.backend() == 'tensorflow': 
     condition = n >= c 
     then_expression = tf.scalar_mul(c/n, g) 
     else_expression = g 

     # saving the shape to avoid converting sparse tensor to dense 
     if isinstance(then_expression, tf.Tensor): 
      g_shape = copy.copy(then_expression.get_shape()) 
     elif isinstance(then_expression, tf.IndexedSlices): 
      g_shape = copy.copy(then_expression.dense_shape) 
     if condition.dtype != tf.bool: 
      condition = tf.cast(condition, 'bool') 
     g = tf.cond(condition, 
        lambda: then_expression, 
        lambda: else_expression) 
     if isinstance(then_expression, tf.Tensor): 
      g.set_shape(g_shape) 
     elif isinstance(then_expression, tf.IndexedSlices): 
      g._dense_shape = g_shape 
    else: 
     g = K.switch(K.greater_equal(n, c), g * c/n, g) 
    return g 


class Optimizer(object): 
    """Abstract optimizer base class. 
    Note: this is the parent class of all optimizers, not an actual optimizer 
    that can be used for training models. 
    All Keras optimizers support the following keyword arguments: 
     clipnorm: float >= 0. Gradients will be clipped 
      when their L2 norm exceeds this value. 
     clipvalue: float >= 0. Gradients will be clipped 
      when their absolute value exceeds this value. 
    """ 

    def __init__(self, **kwargs): 
     allowed_kwargs = {'clipnorm', 'clipvalue'} 
     for k in kwargs: 
      if k not in allowed_kwargs: 
       raise TypeError('Unexpected keyword argument ' 
           'passed to optimizer: ' + str(k)) 
     self.__dict__.update(kwargs) 
     self.updates = [] 
     self.weights = [] 

    @interfaces.legacy_get_updates_support 
    def get_updates(self, loss, params): 
     raise NotImplementedError 

    def get_gradients(self, loss, params): 
     grads = K.gradients(loss, params) 
     if hasattr(self, 'clipnorm') and self.clipnorm > 0: 
      norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads])) 
      grads = [clip_norm(g, self.clipnorm, norm) for g in grads] 
     if hasattr(self, 'clipvalue') and self.clipvalue > 0: 
      grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads] 
     return grads 

    def set_weights(self, weights): 
     """Sets the weights of the optimizer, from Numpy arrays. 
     Should only be called after computing the gradients 
     (otherwise the optimizer has no weights). 
     # Arguments 
      weights: a list of Numpy arrays. The number 
       of arrays and their shape must match 
       number of the dimensions of the weights 
       of the optimizer (i.e. it should match the 
       output of `get_weights`). 
     # Raises 
      ValueError: in case of incompatible weight shapes. 
     """ 
     params = self.weights 
     weight_value_tuples = [] 
     param_values = K.batch_get_value(params) 
     for pv, p, w in zip(param_values, params, weights): 
      if pv.shape != w.shape: 
       raise ValueError('Optimizer weight shape ' + 
           str(pv.shape) + 
           ' not compatible with ' 
           'provided weight shape ' + str(w.shape)) 
      weight_value_tuples.append((p, w)) 
     K.batch_set_value(weight_value_tuples) 

    def get_weights(self): 
     """Returns the current value of the weights of the optimizer. 
     # Returns 
      A list of numpy arrays. 
     """ 
     return K.batch_get_value(self.weights) 

    def get_config(self): 
     config = {} 
     if hasattr(self, 'clipnorm'): 
      config['clipnorm'] = self.clipnorm 
     if hasattr(self, 'clipvalue'): 
      config['clipvalue'] = self.clipvalue 
     return config 

    @classmethod 
    def from_config(cls, config): 
     return cls(**config) 

class testsgd(Optimizer): 
    """Stochastic gradient descent optimizer. 
    Includes support for momentum, 
    learning rate decay, and Nesterov momentum. 
    # Arguments 
     lr: float >= 0. Learning rate. 
     momentum: float >= 0. Parameter updates momentum. 
     decay: float >= 0. Learning rate decay over each update. 
     nesterov: boolean. Whether to apply Nesterov momentum. 
    """ 

    def __init__(self, lr=0.01, momentum=0., decay=0., 
       nesterov=False, **kwargs): 
     super(testsgd, self).__init__(**kwargs) 
     with K.name_scope(self.__class__.__name__): 
      self.iterations = K.variable(0, dtype='int64', name='iterations') 
      self.lr = K.variable(lr, name='lr') 
      self.momentum = K.variable(momentum, name='momentum') 
      self.decay = K.variable(decay, name='decay') 
     self.initial_decay = decay 
     self.nesterov = nesterov 

    @interfaces.legacy_get_updates_support 
    def get_updates(self, loss, params): 
     grads = self.get_gradients(loss, params) 
     self.updates = [K.update_add(self.iterations, 1)] 

     lr = self.lr 
     if self.initial_decay > 0: 
      lr *= (1./(1. + self.decay * K.cast(self.iterations, 
                K.dtype(self.decay)))) 
     # momentum 
     shapes = [K.int_shape(p) for p in params] 
     moments = [K.zeros(shape) for shape in shapes] 
     self.weights = [self.iterations] + moments 
     for p, g, m in zip(params, grads, moments): 
      v = self.momentum * m - lr * g # velocity 
      self.updates.append(K.update(m, v)) 

      if self.nesterov: 
       new_p = p + self.momentum * v - lr * g 
      else: 
       new_p = p + v 

      # Apply constraints. 
      if getattr(p, 'constraint', None) is not None: 
       new_p = p.constraint(new_p) 

      self.updates.append(K.update(p, new_p)) 
     return self.updates 

    def get_config(self): 
     config = {'lr': float(K.get_value(self.lr)), 
        'momentum': float(K.get_value(self.momentum)), 
        'decay': float(K.get_value(self.decay)), 
        'nesterov': self.nesterov} 
     base_config = super(testsgd, self).get_config() 
     return dict(list(base_config.items()) + list(config.items())) 


myopt = testsgd() 
model.compile(loss='binary_crossentropy', 
       optimizer=myopt, 
       metrics=['accuracy']) 

model.fit(x_train, y_train, 
      epochs=20, 
      batch_size=128) 
score = model.evaluate(x_test, y_test, batch_size=128) 

獲得以下錯誤: @ interfaces.legacy_get_updates_support

AttributeError的:模塊 'keras.legacy.interfaces' 有沒有屬性 'legacy_get_updates_support'
如何解決它。

+0

您正在使用哪個Keras的版本? –

+0

我正在使用keras版本2.0.6 – Hitesh

回答

1

您需要升級Keras,因爲版本2.0.7是one,其中包含legacy_get_updates_support函數。

+0

我更新了Keras(2.0.8)。現在出現以下錯誤:ValueError :('無法解釋優化器標識符:',<__ main __。testsgd對象在0x7f0111606ef0> – Hitesh

+0

@Hitesh你的優化器類應該繼承自keras.optimizers.Optimizer,我不確定你爲什麼複製了優化器從Keras到你的源代碼,這就是問題所在。 –

+0

其實我想用我自己的優化方法優化CNN,我正在研究它,所以我正在嘗試如何使優化器類。不管我的優化器代碼應該在我的源代碼中,還是我必須在https://keras.io/optimizers/ – Hitesh

1

你可以定義你自己的從派生自Optimizer基類的類並放入單獨的.py文件,而不是複製粘貼keras的optimizers.py。例如,下面是沒有動力的新版SGD或Nesterov。把它放在一個名爲sgd_cust.py的文件中。

from keras.optimizers import Optimizer 
from keras.legacy import interfaces 
from keras import backend as K 

class SGDCust(Optimizer): 
    """Stochastic gradient descent optimizer. 

    # Arguments 
     lr: float >= 0. Learning rate. 
    """ 

    def __init__(self, lr=0.01, **kwargs): 
     super(SGDCust, self).__init__(**kwargs) 
     with K.name_scope(self.__class__.__name__): 
      self.iterations = K.variable(0, dtype='int64', name='iterations') 
      self.lr = K.variable(lr, name='lr') 

    @interfaces.legacy_get_updates_support 
    def get_updates(self, loss, params): 
     grads = self.get_gradients(loss, params) 
     self.updates = [K.update_add(self.iterations, 1)] 

     lr = self.lr 

     shapes = [K.int_shape(p) for p in params] 
     delta_ws = [K.zeros(shape) for shape in shapes] 
     self.weights = [self.iterations] + delta_ws 
     for p, g, delta_wi in zip(params, grads, delta_ws): 
      delta_w = - lr * g # velocity 
      self.updates.append(K.update(delta_wi, delta_w)) 

      new_p = p + delta_w 

      # Apply constraints. 
      if getattr(p, 'constraint', None) is not None: 
       new_p = p.constraint(new_p) 

      self.updates.append(K.update(p, new_p)) 
     return self.updates 

    def get_config(self): 
     config = {'lr': float(K.get_value(self.lr))} 
     base_config = super(SGDCust, self).get_config() 
     return dict(list(base_config.items()) + list(config.items())) 

現在您可以使用您的自定義optiomizer如下:

from __future__ import print_function 
from sklearn.model_selection import train_test_split 
import keras 
from keras.datasets import mnist 
from keras.models import Sequential 
from sgd_cust import SGDCust 
from keras import backend as K 

batch_size = 10 
num_classes = 10 
epochs = 5 

# the data, shuffled and split between train and test sets 
(x_train, y_train), (x_test, y_test) = mnist.load_data() 
x_train = x_train.reshape(60000, 784).astype('float32')/255 
x_test = x_test_official.reshape(10000, 784).astype('float32')/255 
y_train = keras.utils.to_categorical(y_train, num_classes).astype('float32') 
y_test = keras.utils.to_categorical(y_test, num_classes).astype('float32') 


model = Sequential() 
rnd_normal_init = keras.initializers.glorot_normal() 
model.add(Dense(30, activation='relu', 
       kernel_initializer=rnd_normal_init, 
       bias_initializer=rnd_normal_init, input_shape=(784,))) 
model.add(Dense(num_classes, activation='softmax', 
       kernel_initializer=rnd_normal_init, 
       bias_initializer=rnd_normal_init)) 

model.summary() 

model.compile(loss='categorical_crossentropy', #sum_squared_error 
       optimizer=SGDCust(lr=0.1), 
       metrics=['categorical_accuracy']) 

history = model.fit(x_train, y_train, 
        batch_size=batch_size, 
        epochs=epochs, 
        verbose=2, 
        validation_data=(x_test, y_test)) 
score = model.evaluate(x_test, y_test, verbose=0) 
print('Test loss:', score[0]) 
print('Test accuracy:', score[1])