[scikit-learn] (no subject)

Carlton Banks noflaco at gmail.com
Fri Mar 17 23:36:02 EDT 2017


I am currently struggling with getting good results with my CNN in which i decided to optimize parameter using grid search.  I am currently trying to use scikit-learn GridSearchCV.

    def create_model(init_mode='uniform',activation_mode='linear',optimizer_mode="adam", activation_mode_conv = 'linear'):
            model = Sequential()
    
    
            model.add(ZeroPadding2D((6,4),input_shape=(6,3,3)))
            model.add(Convolution2D(32,3,3 , activation=activation_mode_conv))
            print model.output_shape
            model.add(Convolution2D(32, 3,3, activation=activation_mode_conv))
            print model.output_shape
            model.add(MaxPooling2D(pool_size=(2,2),strides=(2,1)))
            print model.output_shape
            model.add(Convolution2D(64, 3,3 , activation=activation_mode_conv))
            print model.output_shape
            model.add(Convolution2D(64, 3,3 , activation=activation_mode_conv))
            print model.output_shape
            model.add(MaxPooling2D(pool_size=(2,2),strides=(2,1)))
            model.add(Flatten())
            print model.output_shape
            model.add(Dense(output_dim=32, input_dim=64, init=init_mode,activation=activation_mode))
            model.add(Dense(output_dim=13, input_dim=50, init=init_mode,activation=activation_mode))
            model.add(Dense(output_dim=1, input_dim=13, init=init_mode,activation=activation_mode))
            model.add(Dense(output_dim=1,  init=init_mode, activation=activation_mode))
            #print model.summary()
            model.compile(loss='mean_squared_error',optimizer=optimizer_mode)
    
            return model
            #reduce_lr=ReduceLROnPlateau(monitor='val_loss', factor=0.01, patience=3, verbose=1, mode='auto', epsilon=0.1, cooldown=0, min_lr=0.000000000000000001)
            #stop  = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
    
            #log=csv_logger = CSVLogger('training_'+str(i)+'.csv')
            #print "Model Train"
    
            #hist_current = model.fit(np.array(data_train_input),
            #                    np.array(data_train_output),
            #                    shuffle=False,
            #                    validation_data=(np.array(data_test_input),np.array(data_test_output)),
            #                    validation_split=0.1,
            #                    nb_epoch=150000,
            #                    verbose=1,
            #                    callbacks=[reduce_lr,log,stop])
    
            #print()
            #print model.summary()
            #print "Model stored"
            #model.save(spectogram_path+"Model"+str(feature)+".h5")
            #model.save_weights(spectogram_path+"Model"+str(feature)+"_weights.h5")
            #del model
    
    
    
    ## Make it work for other feature ranges
    ## Add the CNN part and test it
    ## Try with gabor kernels as suggested by the other paper..
    
    input_train, input_test, output_train, output_test =  model(0,train_input_data_interweawed_normalized[:-(len(train_input_data_interweawed_normalized)-1000)],output_data_train[:-(len(output_data_train)-1000)],test_input_data_interweawed_normalized[:-(len(test_input_data_interweawed_normalized)-1000)],output_data_test[:-(len(output_data_test)-1000)])
    
    del test_input_data
    del test_name
    del test_input_data_normalized
    del test_name_normalized
    del test_input_data_interweawed
    del test_name_interweawed
    del test_input_data_interweawed_normalized
    del test_name_interweawed_normalized
    
    del train_input_data
    del train_name
    del train_input_data_normalized
    del train_name_normalized
    del train_input_data_interweawed
    del train_name_interweawed
    del train_input_data_interweawed_normalized
    del train_name_interweawed_normalized
    
    
    seed = 7
    np.random.seed(seed)
    print "Regressor"
    model = KerasRegressor(build_fn = create_model, verbose = 10)
    init_mode_list = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
    activation_mode_list = ['softmax', 'softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']
    activation_mode_list_conv =  ['softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']
    optimizer_mode_list = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
    batch_size_list = [10, 20, 40, 60, 80, 100]
    epochs = [10, 50, 100]
    param_grid = dict(init_mode=init_mode_list, batch_size=batch_size_list, nb_epoch=epochs, activation_mode=activation_mode_list, optimizer_mode = optimizer_mode_list, activation_mode_conv =  activation_mode_list_conv)
    grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
    print "Grid fit"
    grid_result = grid.fit(np.asarray(input_train), np.array(output_train))
    
    # summarize results
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))

This runs.. but problems with it that it only provides a result at the end. I ran the code once but then it crashed with this error message: 

    cannot allocate memory for thread-local data: ABORT

I am not sure what could cause this problem?


More information about the scikit-learn mailing list