Facebook
From Reliable Armadillo, 1 Month ago, written in Plain Text.
Embed
Download Paste or View Raw
Hits: 43
  1. #Loading the IMDB dataset
  2. from keras.datasets import imdb
  3.  
  4. (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
  5.  
  6. #Decoding reviews back to english
  7. word_index = imdb.get_word_index()
  8. reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
  9. decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for  i in train_data[0]])
  10.  
  11. #Encoding the integer sequences into a binary matrix
  12. import numpy as np
  13.  
  14. def vectorize_sequences(sequences, dimension=10000):
  15.     results = np.zeros( (len(sequences), dimension) )
  16.     for i, sequence in enumerate(sequences):
  17.         results[i, sequence] = 1.
  18.     return results
  19.  
  20. x_train = vectorize_sequences(train_data)
  21. x_test = vectorize_sequences(test_data)
  22.  
  23. #Vectorizing labels
  24. y_train = np.asarray(train_labels).astype('float32')
  25. y_test = np.asarray(test_labels).astype('float32')
  26.  
  27. #The model definition
  28. from keras import models
  29. from keras import layers
  30. from keras import regularizers
  31.  
  32. model = models.Sequential()
  33. model.add(layers.Dense(4, kernel_regularizer = regularizers.l2(0.001), activation='relu', input_shape=(10000,)))
  34. model.add(layers.Dropout(0.5))
  35. model.add(layers.Dense(4, kernel_regularizer = regularizers.l2(0.001), activation='relu'))
  36. model.add(layers.Dropout(0.5))
  37. model.add(layers.Dense(1, activation='sigmoid'))
  38.  
  39. #Setting aside a validation set
  40. x_val = x_train[:10000]
  41. partial_x_train = x_train[10000:]
  42.  
  43. y_val = y_train[:10000]
  44. partial_y_train = y_train[10000:]
  45.  
  46. #Training the model
  47. model.compile(optimizer='rmsprop',
  48.               loss='binary_crossentropy',
  49.               metrics=['acc'])
  50.  
  51. history = model.fit(x_train,
  52.                     y_train,
  53.                     epochs=20,
  54.                     batch_size=512)
  55. '''
  56. #Plotting the training and validation loss
  57. import matplotlib.pyplot as plt
  58.  
  59. history_dict = history.history
  60. loss_values = history_dict['loss']
  61. val_loss_values = history_dict['val_loss']
  62.  
  63. epochs = range(1, 20 + 1)
  64.  
  65. plt.plot(epochs, loss_values, 'bo', label='Training loss')
  66. plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
  67. plt.title('Training and validation loss')
  68. plt.xlabel('Epochs')
  69. plt.ylabel('Loss')
  70. plt.legend()
  71.  
  72. plt.show()
  73.  
  74. #Plotting the training and validation accuracy
  75. plt.clf()
  76. acc_values = history_dict['acc']
  77. val_acc_values = history_dict['val_acc']
  78.  
  79. plt.plot(epochs, acc_values, 'bo', label='Training acc')
  80. plt.plot(epochs, val_acc_values, 'b', label='Validation acc')
  81. plt.title('Training and validation accuracy')
  82. plt.xlabel('Epochs')
  83. plt.ylabel('Accuracy')
  84. plt.legend()
  85.  
  86. plt.show()'''
  87. print('Acurracy:', model.evaluate(x_test, y_test)[1])