In [1]:
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Download data
dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
# Load data with pandas
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin']
dataset = pd.read_csv(dataset_path, names=column_names, na_values="?", comment='\t', sep=" ", skipinitialspace=True)
# Remove lines with unknown values
dataset = dataset.dropna()
# Origin column is categorical, make it numeric
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1) * 1.0
dataset['Europe'] = (origin == 2) * 1.0
dataset['Japan'] = (origin == 3) * 1.0
# Split data into validation and train sets
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# Separate the target value from features
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
# Normalize
train_stats = train_dataset.describe().transpose()
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
train_data = norm(train_dataset)
test_data = norm(test_dataset)
# Create model
model = keras.Sequential([layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation=tf.nn.relu), layers.Dense(1)])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.RMSprop(0.001), metrics=['mean_absolute_error', 'mean_squared_error'])
# Stop training when the validation score doesn't improve
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# Start learning
model.fit(train_data, train_labels, epochs=1000, validation_split=0.2, callbacks=[early_stop], verbose=False)
import matplotlib.pyplot as plt
loss, mae, mse = model.evaluate(test_data, test_labels, verbose=0)
test_predictions = model.predict(test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.title("Mean Abs Error: %.2f MPG" % (mae, ))
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
plt.show()
Downloading data from https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data 30286/30286 [==============================] - 0s 1us/step 3/3 [==============================] - 0s 12ms/step
In [2]:
import tensorflow as tf
mnist = tf.keras.datasets.mnist
# Load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Create model
model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Start learning
model.fit(x_train, y_train, epochs=5)
# Print the loss value and accuracy
print(model.evaluate(x_test, y_test))
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz 11490434/11490434 [==============================] - 1s 0us/step
2023-03-16 11:26:53.620433: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 188160000 exceeds 10% of free system memory.
Epoch 1/5 1875/1875 [==============================] - 99s 51ms/step - loss: 0.2186 - accuracy: 0.9346 Epoch 2/5 1875/1875 [==============================] - 98s 52ms/step - loss: 0.0973 - accuracy: 0.9706 Epoch 3/5 1875/1875 [==============================] - 99s 53ms/step - loss: 0.0681 - accuracy: 0.9789 Epoch 4/5 1875/1875 [==============================] - 95s 51ms/step - loss: 0.0549 - accuracy: 0.9825 Epoch 5/5 1875/1875 [==============================] - 98s 52ms/step - loss: 0.0426 - accuracy: 0.9861
2023-03-16 11:35:04.030336: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 31360000 exceeds 10% of free system memory.
313/313 [==============================] - 7s 20ms/step - loss: 0.0638 - accuracy: 0.9801 [0.06377729028463364, 0.9800999760627747]
In [3]:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import tkinter, sys
# Sometimes training takes longer with eager execution enabled
tf.compat.v1.disable_eager_execution()
# We don't have a terminal available in GUI mode, so let's simulate it
class stdredir(object):
def __init__(self):
self.root = tkinter.Tk()
self.widget = tkinter.Text(self.root)
self.widget.pack(fill='both', expand=True)
self.text = ''
def write(self, string):
self.text += string.replace('\b', '')
if '\r' in string:
cutto = self.text.rindex('\r')
sfrom = -1
try:
sfrom = self.text.rindex('\n', 0, cutto)
except:
pass
self.text = self.text[:sfrom + 1] + self.text[cutto + 1:]
def flush(self):
self.widget.delete(1.0, tkinter.END)
self.widget.insert(tkinter.END, self.text)
self.widget.see(tkinter.END)
self.widget.update()
#sys.stdout = stdredir()
# The actual example
# Load data
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Class names are not included in the dataset
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# Normalize data
train_images = train_images / 255.0
test_images = test_images / 255.0
# Create model
model = keras.Sequential([keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Start learning
model.fit(train_images, train_labels, epochs=5)
# Predict results
predictions = model.predict(test_images)
# Helper functions
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100 * np.max(predictions_array), class_names[true_label]), color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# Close the text window
#sys.stdout.root.destroy()
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions, test_labels)
plt.show()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 29515/29515 [==============================] - 0s 2us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26421880/26421880 [==============================] - 1s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 5148/5148 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4422102/4422102 [==============================] - 0s 0us/step Train on 60000 samples Epoch 1/5
2023-03-16 11:39:08.518418: W tensorflow/c/c_api.cc:300] Operation '{name:'training/Adam/beta_2/Assign' id:174 op device:{requested: '', assigned: ''} def:{{{node training/Adam/beta_2/Assign}} = AssignVariableOp[_has_manual_control_dependencies=true, dtype=DT_FLOAT, validate_shape=false](training/Adam/beta_2, training/Adam/beta_2/Initializer/initial_value)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
60000/60000 [==============================] - 31s 510us/sample - loss: 0.4956 - accuracy: 0.8245 Epoch 2/5 60000/60000 [==============================] - 39s 644us/sample - loss: 0.3764 - accuracy: 0.8648 Epoch 3/5 60000/60000 [==============================] - 35s 577us/sample - loss: 0.3380 - accuracy: 0.8779 Epoch 4/5 60000/60000 [==============================] - 38s 630us/sample - loss: 0.3130 - accuracy: 0.8849 Epoch 5/5 60000/60000 [==============================] - 39s 653us/sample - loss: 0.2943 - accuracy: 0.8919
/usr/local/lib/python3.11/site-packages/keras/engine/training_v1.py:2359: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically. updates=self.state_updates, 2023-03-16 11:42:09.735463: W tensorflow/c/c_api.cc:300] Operation '{name:'dense_6/Softmax' id:52 op device:{requested: '', assigned: ''} def:{{{node dense_6/Softmax}} = Softmax[T=DT_FLOAT, _has_manual_control_dependencies=true](dense_6/BiasAdd)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
In [4]:
import tensorflow as tf
from tensorflow import keras
# Load data
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
# The first indices are reserved
word_index = {k: (v + 3) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2
word_index["<UNUSED>"] = 3
# Decode review to text
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text if i != 0])
# Pad all reviews to the same length
train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=256)
# Create model
vocab_size = 10000
model = keras.Sequential([keras.layers.Embedding(vocab_size, 16), keras.layers.GlobalAveragePooling1D(), keras.layers.Dense(16, activation=tf.nn.relu), keras.layers.Dense(1, activation=tf.nn.sigmoid)])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# Split data into validation and train sets
x_val, y_val = train_data[:10000], train_labels[:10000]
partial_x_train, partial_y_train = train_data[10000:], train_labels[10000:]
# Start learning
model.fit(partial_x_train, partial_y_train, batch_size=512, epochs=30, validation_data=(x_val, y_val))
results = model.evaluate(test_data, test_labels)
print(results)
COUNT = 5
test_data_print, test_labels_print = test_data[:COUNT], test_labels[:COUNT]
predictions = model.predict(test_data_print)
for i in range(COUNT):
print("Review: " + decode_review(test_data_print[i]))
pred = predictions[i][0]
value = test_labels_print[i]
print("Prediction / Value: " + str(pred) + " / " + str(value))
if (pred < 0.5) != (value < 0.5):
print('\x1b[1;31mIncorrect prediction')
else:
print('\x1b[1;32mCorrect prediction')
print('\x1b[0m')
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz 17464789/17464789 [==============================] - 1s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json 1641221/1641221 [==============================] - 0s 0us/step Train on 15000 samples, validate on 10000 samples
2023-03-16 11:43:24.443262: W tensorflow/c/c_api.cc:300] Operation '{name:'training_2/Adam/dense_8/kernel/v/Assign' id:635 op device:{requested: '', assigned: ''} def:{{{node training_2/Adam/dense_8/kernel/v/Assign}} = AssignVariableOp[_has_manual_control_dependencies=true, dtype=DT_FLOAT, validate_shape=false](training_2/Adam/dense_8/kernel/v, training_2/Adam/dense_8/kernel/v/Initializer/zeros)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
Epoch 1/30 512/15000 [>.............................] - ETA: 12s - loss: 0.6936 - acc: 0.4727
2023-03-16 11:43:25.005953: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 8388608 exceeds 10% of free system memory. 2023-03-16 11:43:25.079503: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 8388608 exceeds 10% of free system memory. 2023-03-16 11:43:25.146009: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 8388608 exceeds 10% of free system memory.
15000/15000 [==============================] - ETA: 0s - loss: 0.6924 - acc: 0.5231
/usr/local/lib/python3.11/site-packages/keras/engine/training_v1.py:2335: UserWarning: `Model.state_updates` will be removed in a future version. This property should not be used in TensorFlow 2.0, as `updates` are applied automatically. updates = self.state_updates 2023-03-16 11:43:27.499389: W tensorflow/c/c_api.cc:300] Operation '{name:'loss_1/mul' id:423 op device:{requested: '', assigned: ''} def:{{{node loss_1/mul}} = Mul[T=DT_FLOAT, _has_manual_control_dependencies=true](loss_1/mul/x, loss_1/dense_8_loss/value)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
15000/15000 [==============================] - 3s 223us/sample - loss: 0.6924 - acc: 0.5231 - val_loss: 0.6908 - val_acc: 0.7018 Epoch 2/30 15000/15000 [==============================] - 3s 169us/sample - loss: 0.6877 - acc: 0.7379 - val_loss: 0.6841 - val_acc: 0.7289 Epoch 3/30 15000/15000 [==============================] - 3s 187us/sample - loss: 0.6766 - acc: 0.7417 - val_loss: 0.6699 - val_acc: 0.7464 Epoch 4/30 15000/15000 [==============================] - 3s 189us/sample - loss: 0.6567 - acc: 0.7647 - val_loss: 0.6470 - val_acc: 0.7649 Epoch 5/30 15000/15000 [==============================] - 3s 188us/sample - loss: 0.6261 - acc: 0.7911 - val_loss: 0.6144 - val_acc: 0.7822 Epoch 6/30 15000/15000 [==============================] - 3s 201us/sample - loss: 0.5860 - acc: 0.8064 - val_loss: 0.5740 - val_acc: 0.7962 Epoch 7/30 15000/15000 [==============================] - 3s 201us/sample - loss: 0.5395 - acc: 0.8265 - val_loss: 0.5304 - val_acc: 0.8147 Epoch 8/30 15000/15000 [==============================] - 3s 180us/sample - loss: 0.4913 - acc: 0.8416 - val_loss: 0.4877 - val_acc: 0.8287 Epoch 9/30 15000/15000 [==============================] - 3s 183us/sample - loss: 0.4455 - acc: 0.8577 - val_loss: 0.4501 - val_acc: 0.8402 Epoch 10/30 15000/15000 [==============================] - 3s 189us/sample - loss: 0.4054 - acc: 0.8705 - val_loss: 0.4168 - val_acc: 0.8499 Epoch 11/30 15000/15000 [==============================] - 3s 197us/sample - loss: 0.3708 - acc: 0.8797 - val_loss: 0.3907 - val_acc: 0.8566 Epoch 12/30 15000/15000 [==============================] - 3s 182us/sample - loss: 0.3425 - acc: 0.8865 - val_loss: 0.3694 - val_acc: 0.8621 Epoch 13/30 15000/15000 [==============================] - 3s 222us/sample - loss: 0.3179 - acc: 0.8910 - val_loss: 0.3519 - val_acc: 0.8673 Epoch 14/30 15000/15000 [==============================] - 3s 232us/sample - loss: 0.2962 - acc: 0.8995 - val_loss: 0.3380 - val_acc: 0.8708 Epoch 15/30 15000/15000 [==============================] - 3s 217us/sample - loss: 0.2774 - acc: 0.9044 - val_loss: 0.3267 - val_acc: 0.8730 Epoch 16/30 15000/15000 [==============================] - 3s 184us/sample - loss: 0.2612 - acc: 0.9098 - val_loss: 0.3171 - val_acc: 0.8757 Epoch 17/30 15000/15000 [==============================] - 3s 207us/sample - loss: 0.2465 - acc: 0.9135 - val_loss: 0.3097 - val_acc: 0.8783 Epoch 18/30 15000/15000 [==============================] - 3s 218us/sample - loss: 0.2328 - acc: 0.9189 - val_loss: 0.3036 - val_acc: 0.8806 Epoch 19/30 15000/15000 [==============================] - 3s 201us/sample - loss: 0.2208 - acc: 0.9226 - val_loss: 0.2989 - val_acc: 0.8801 Epoch 20/30 15000/15000 [==============================] - 2s 162us/sample - loss: 0.2098 - acc: 0.9266 - val_loss: 0.2946 - val_acc: 0.8818 Epoch 21/30 15000/15000 [==============================] - 3s 168us/sample - loss: 0.1996 - acc: 0.9311 - val_loss: 0.2915 - val_acc: 0.8832 Epoch 22/30 15000/15000 [==============================] - 3s 178us/sample - loss: 0.1907 - acc: 0.9347 - val_loss: 0.2901 - val_acc: 0.8839 Epoch 23/30 15000/15000 [==============================] - 3s 184us/sample - loss: 0.1817 - acc: 0.9389 - val_loss: 0.2877 - val_acc: 0.8844 Epoch 24/30 15000/15000 [==============================] - 3s 221us/sample - loss: 0.1734 - acc: 0.9437 - val_loss: 0.2866 - val_acc: 0.8853 Epoch 25/30 15000/15000 [==============================] - 3s 203us/sample - loss: 0.1661 - acc: 0.9453 - val_loss: 0.2862 - val_acc: 0.8852 Epoch 26/30 15000/15000 [==============================] - 3s 185us/sample - loss: 0.1592 - acc: 0.9497 - val_loss: 0.2871 - val_acc: 0.8842 Epoch 27/30 15000/15000 [==============================] - 3s 192us/sample - loss: 0.1522 - acc: 0.9522 - val_loss: 0.2865 - val_acc: 0.8856 Epoch 28/30 15000/15000 [==============================] - 3s 179us/sample - loss: 0.1456 - acc: 0.9550 - val_loss: 0.2870 - val_acc: 0.8861 Epoch 29/30 15000/15000 [==============================] - 3s 188us/sample - loss: 0.1396 - acc: 0.9577 - val_loss: 0.2877 - val_acc: 0.8865 Epoch 30/30 15000/15000 [==============================] - 3s 173us/sample - loss: 0.1337 - acc: 0.9602 - val_loss: 0.2896 - val_acc: 0.8861 [0.3057033189201355, 0.876]
2023-03-16 11:44:57.603723: W tensorflow/c/c_api.cc:300] Operation '{name:'dense_8/Sigmoid' id:366 op device:{requested: '', assigned: ''} def:{{{node dense_8/Sigmoid}} = Sigmoid[T=DT_FLOAT, _has_manual_control_dependencies=true](dense_8/BiasAdd)}}' was changed by setting attribute after it was run by a session. This mutation will have no effect, and will trigger an error in the future. Either don't modify nodes after running them or create a new session.
Review: <START> please give this one a miss br br <UNK> <UNK> and the rest of the cast rendered terrible performances the show is flat flat flat br br i don't know how michael madison could have allowed this one on his plate he almost seemed to know this wasn't going to work out and his performance was quite <UNK> so all you madison fans give this a miss Prediction / Value: 0.11655851 / 0 Correct prediction Review: a lot of patience because it focuses on mood and character development the plot is very simple and many of the scenes take place on the same set in frances <UNK> the sandy dennis character apartment but the film builds to a disturbing climax br br the characters create an atmosphere <UNK> with sexual tension and psychological <UNK> it's very interesting that robert altman directed this considering the style and structure of his other films still the trademark altman audio style is evident here and there i think what really makes this film work is the brilliant performance by sandy dennis it's definitely one of her darker characters but she plays it so perfectly and convincingly that it's scary michael burns does a good job as the mute young man regular altman player michael murphy has a small part the <UNK> moody set fits the content of the story very well in short this movie is a powerful study of loneliness sexual <UNK> and desperation be patient <UNK> up the atmosphere and pay attention to the wonderfully written script br br i praise robert altman this is one of his many films that deals with unconventional fascinating subject matter this film is disturbing but it's sincere and it's sure to <UNK> a strong emotional response from the viewer if you want to see an unusual film some might even say bizarre this is worth the time br br unfortunately it's very difficult to find in video stores you may have to buy it off the internet Prediction / Value: 0.9985179 / 1 Correct prediction Review: no improvement and demand a different king irritated <UNK> sends them a <UNK> br br delighted with this <UNK> looking new king who towers above them the <UNK> welcome him with a <UNK> of <UNK> dressed <UNK> the mayor steps forward to hand him the key to the <UNK> as <UNK> cameras record the event to everyone's horror the <UNK> promptly eats the mayor and then goes on a merry rampage <UNK> citizens at random a title card <UNK> reads news of the king's <UNK> throughout the kingdom when the now terrified <UNK> once more <UNK> <UNK> for help he loses his temper and <UNK> their community with lightning <UNK> the moral of our story delivered by a hapless frog just before he is eaten is let well enough alone br br considering the time period when this startling little film was made and considering the fact that it was made by a russian <UNK> at the height of that <UNK> country's civil war it would be easy to see this as a <UNK> about those events <UNK> may or may not have had <UNK> turmoil in mind when he made <UNK> but whatever <UNK> his choice of material the film stands as a <UNK> tale of universal <UNK> <UNK> could be the soviet union italy germany or japan in the 1930s or any country of any era that lets its guard down and is overwhelmed by <UNK> it's a fascinating film even a charming one in its macabre way but its message is no joke Prediction / Value: 0.7573268 / 1 Correct prediction Review: <START> i generally love this type of movie however this time i found myself wanting to kick the screen since i can't do that i will just complain about it this was absolutely idiotic the things that happen with the dead kids are very cool but the alive people are absolute idiots i am a grown man pretty big and i can defend myself well however i would not do half the stuff the little girl does in this movie also the mother in this movie is reckless with her children to the point of neglect i wish i wasn't so angry about her and her actions because i would have otherwise enjoyed the flick what a number she was take my advise and fast forward through everything you see her do until the end also is anyone else getting sick of watching movies that are filmed so dark anymore one can hardly see what is being filmed as an audience we are <UNK> involved with the actions on the screen so then why the hell can't we have night vision Prediction / Value: 0.5591276 / 0 Incorrect prediction Review: <START> like some other people wrote i'm a die hard mario fan and i loved this game br br this game starts slightly boring but trust me it's worth it as soon as you start your hooked the levels are fun and <UNK> they will hook you <UNK> your mind turns to <UNK> i'm not kidding this game is also <UNK> and is beautifully done br br to keep this spoiler free i have to keep my mouth shut about details but please try this game it'll be worth it br br story 9 9 action 10 1 it's that good <UNK> 10 attention <UNK> 10 average 10 Prediction / Value: 0.9897942 / 1 Correct prediction
In [ ]:
沒有留言:
發佈留言