A error occur when run an auto ML model

simple_bind error. Arguments: Input_0: (16, 3, 480, 640) [06:48:55] src/operator/./cudnn_convolution-inl.h:556: Check failed: e == CUDNN_STATUS_SUCCESS (9 vs. 0) cuDNN: CUDNN_STATUS_NOT_SUPPORTED Stack trace returned 10 entries: [bt] (0) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x26ac5c) [0x7f32bc3c4c5c] [bt] (1) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x360307d) [0x7f32bf75d07d] [bt] (2) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x35f9add) [0x7f32bf753add] [bt] (3) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x235e5b4) [0x7f32be4b85b4] [bt] (4) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x22d5668) [0x7f32be42f668] [bt] (5) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x338a25) [0x7f32bc492a25] [bt] (6) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x20b3bd2) [0x7f32be20dbd2] [bt] (7) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x20d2a1b) [0x7f32be22ca1b] [bt] (8) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x20d6b06) [0x7f32be230b06] [bt] (9) /usr/local/lib/python3.5/dist-packages/mxnet/libmxnet.so(+0x20d7174) [0x7f32be231174]

we have tried a pretrained model.
Also has problem.
It is a test of soda bottle test.

could you share your model file privately (you can email it to us via support) ?

it is an auto ML model.

import keras
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers import Input
from keras.models import Model
from keras.regularizers import *

def get_model():
aliases = {}
Input_0 = Input(shape=(3, 480, 640), name=‘Input_0’)
convolution2d_391 = Convolution2D(name=‘convolution2d_391’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 500,border_mode= ‘same’ ,nb_col= 2)(Input_0)
convolution2d_392 = Convolution2D(name=‘convolution2d_392’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 500,border_mode= ‘same’ ,nb_col= 2)(convolution2d_391)
maxpooling2d_73 = MaxPooling2D(name=‘maxpooling2d_73’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_392)
convolution2d_393 = Convolution2D(name=‘convolution2d_393’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 13,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_73)
convolution2d_394 = Convolution2D(name=‘convolution2d_394’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 13,border_mode= ‘same’ ,nb_col= 2)(convolution2d_393)
batchnormalization_222 = BatchNormalization(name=‘batchnormalization_222’)(convolution2d_394)
maxpooling2d_74 = MaxPooling2D(name=‘maxpooling2d_74’,dim_ordering= ‘th’ ,strides= (2, 2))(batchnormalization_222)
convolution2d_395 = Convolution2D(name=‘convolution2d_395’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_74)
convolution2d_396 = Convolution2D(name=‘convolution2d_396’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_395)
convolution2d_397 = Convolution2D(name=‘convolution2d_397’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_396)
convolution2d_398 = Convolution2D(name=‘convolution2d_398’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_397)
maxpooling2d_75 = MaxPooling2D(name=‘maxpooling2d_75’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_398)
convolution2d_399 = Convolution2D(name=‘convolution2d_399’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_75)
convolution2d_400 = Convolution2D(name=‘convolution2d_400’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_399)
batchnormalization_223 = BatchNormalization(name=‘batchnormalization_223’)(convolution2d_400)
convolution2d_401 = Convolution2D(name=‘convolution2d_401’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(batchnormalization_223)
convolution2d_402 = Convolution2D(name=‘convolution2d_402’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_401)
maxpooling2d_76 = MaxPooling2D(name=‘maxpooling2d_76’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_402)
convolution2d_403 = Convolution2D(name=‘convolution2d_403’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_76)
convolution2d_404 = Convolution2D(name=‘convolution2d_404’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_403)
convolution2d_405 = Convolution2D(name=‘convolution2d_405’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_404)
convolution2d_406 = Convolution2D(name=‘convolution2d_406’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_405)
maxpooling2d_77 = MaxPooling2D(name=‘maxpooling2d_77’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_406)
convolution2d_407 = Convolution2D(name=‘convolution2d_407’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_77)
convolution2d_408 = Convolution2D(name=‘convolution2d_408’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_407)
batchnormalization_224 = BatchNormalization(name=‘batchnormalization_224’)(convolution2d_408)
convolution2d_409 = Convolution2D(name=‘convolution2d_409’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(batchnormalization_224)
convolution2d_410 = Convolution2D(name=‘convolution2d_410’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_409)
maxpooling2d_78 = MaxPooling2D(name=‘maxpooling2d_78’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_410)
convolution2d_411 = Convolution2D(name=‘convolution2d_411’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(maxpooling2d_78)
convolution2d_412 = Convolution2D(name=‘convolution2d_412’,activation= ‘linear’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_411)
convolution2d_413 = Convolution2D(name=‘convolution2d_413’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_412)
convolution2d_414 = Convolution2D(name=‘convolution2d_414’,activation= ‘relu’ ,nb_row= 2,dim_ordering= ‘th’ ,nb_filter= 32,border_mode= ‘same’ ,nb_col= 2)(convolution2d_413)
maxpooling2d_79 = MaxPooling2D(name=‘maxpooling2d_79’,dim_ordering= ‘th’ ,strides= (2, 2))(convolution2d_414)
flatten = Flatten(name=‘flatten’)(maxpooling2d_79)
dense_46 = Dense(name=‘dense_46’,output_dim= 2048,activation= ‘linear’ )(flatten)
activation_143 = Activation(name=‘activation_143’,activation= ‘relu’ )(dense_46)
dense_47 = Dense(name=‘dense_47’,output_dim= 1024,activation= ‘linear’ )(activation_143)
batchnormalization_225 = BatchNormalization(name=‘batchnormalization_225’)(dense_47)
activation_144 = Activation(name=‘activation_144’,activation= ‘relu’ )(batchnormalization_225)
dropout_16 = Dropout(name=‘dropout_16’,p= 0.4)(activation_144)
dense_48 = Dense(name=‘dense_48’,output_dim= 1024,activation= ‘linear’ )(dropout_16)
activation_145 = Activation(name=‘activation_145’,activation= ‘relu’ )(dense_48)
dense_49 = Dense(name=‘dense_49’,output_dim= 8,activation= ‘softmax’ )(activation_145)

model = Model([Input_0],[dense_49])
return aliases, model

from keras.optimizers import *

def get_optimizer():
return Adadelta()

def is_custom_loss_function():
return False

def get_loss_function():
return ‘categorical_crossentropy’

def get_batch_size():
return 32

def get_num_epoch():
return 10

def get_data_config():
return ‘{“shuffle”: false, “mapping”: {“Image”: {“type”: “Image”, “shape”: “”, “options”: {“Normalization”: false, “Resize”: false, “width_shift_range”: 0, “Height”: 28, “rotation_range”: 0, “vertical_flip”: false, “pretrained”: “None”, “Width”: 28, “horizontal_flip”: false, “height_shift_range”: 0, “shear_range”: 0, “Augmentation”: false, “Scaling”: 1}, “port”: “InputPort0”}, “Label”: {“type”: “Categorical”, “shape”: “”, “options”: {}, “port”: “OutputPort0”}}, “numPorts”: 1, “samples”: {“split”: 4, “training”: 5280, “validation”: 660, “test”: 660}, “kfold”: 1, “datasetLoadOption”: “batch”, “dataset”: {“type”: “private”, “name”: “soda_bottle-6600”, “samples”: 6600}}’

This error could be result of too big network to fit on GPU/System. Can you try reducing batch size and/or reducing the image size ?

Dear rajendra,
Thank you very much.
I will try it.