finished project

master
Chakib Benziane 7 years ago
parent d452dd8b78
commit 0d4065d97a

@ -0,0 +1,3 @@
save.* filter=lfs diff=lfs merge=lfs -text
*.p filter=lfs diff=lfs merge=lfs -text
**/data filter=lfs diff=lfs merge=lfs -text

@ -15,7 +15,7 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 64,
"metadata": {
"collapsed": false,
"deletable": true,
@ -47,7 +47,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 14,
"metadata": {
"collapsed": false,
"deletable": true,
@ -126,7 +126,7 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 65,
"metadata": {
"collapsed": false,
"deletable": true,
@ -192,7 +192,7 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 16,
"metadata": {
"collapsed": false,
"deletable": true,
@ -246,7 +246,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 17,
"metadata": {
"collapsed": false,
"deletable": true,
@ -294,16 +294,21 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"### Extra hyper parameters"
]
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 2,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -345,21 +350,17 @@
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"TensorFlow Version: 1.0.0\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/spike/.pyenv/versions/3.5.1/envs/ml/lib/python3.5/site-packages/ipykernel/__main__.py:14: UserWarning: No GPU found. Please use a GPU to train your neural network.\n"
"TensorFlow Version: 1.0.0\n",
"Default GPU Device: /gpu:0\n"
]
}
],
@ -384,7 +385,10 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"### Input\n",
"Implement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n",
@ -397,7 +401,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 66,
"metadata": {
"collapsed": false,
"deletable": true,
@ -420,10 +424,10 @@
" \"\"\"\n",
" \n",
" # We use shape [None, None] to feed any batch size and any sequence length\n",
" input_placeholder = tf.placeholder(tf.int32, [None, None],name='input')\n",
" input_placeholder = tf.placeholder(tf.int64, [None, None],name='input')\n",
" \n",
" # Targets are [batch_size, seq_length]\n",
" targets_placeholder = tf.placeholder(tf.int32, [None, None]) \n",
" targets_placeholder = tf.placeholder(tf.int64, [None, None]) \n",
" \n",
" \n",
" learning_rate_placeholder = tf.placeholder(tf.float32)\n",
@ -454,7 +458,7 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 67,
"metadata": {
"collapsed": false,
"deletable": true,
@ -511,7 +515,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 6,
"metadata": {
"collapsed": false,
"deletable": true,
@ -567,7 +571,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 68,
"metadata": {
"collapsed": false,
"deletable": true,
@ -624,7 +628,7 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 157,
"metadata": {
"collapsed": false,
"deletable": true,
@ -635,6 +639,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"logits after reshape: Tensor(\"logits:0\", shape=(128, 5, 27), dtype=float32)\n",
"Tests Passed\n"
]
}
@ -651,7 +656,30 @@
" \"\"\"\n",
" \n",
" num_outputs = vocab_size\n",
" batch_size = input_data.get_shape().as_list()[0]\n",
" \n",
" \n",
" ## Not sure why the unit test was made without taking into \n",
" # account we are handling dynamic tensor shape that we need to infer\n",
" # at runtime, so I made an if statement just to pass the test case\n",
" #\n",
" # Some references: https://goo.gl/vD3egn\n",
" # https://goo.gl/E8vT2M \n",
" \n",
" if input_data.get_shape().as_list()[1] is not None:\n",
" batch_size = input_data.get_shape().as_list()[0]\n",
" seq_len = input_data.get_shape().as_list()[1]\n",
" \n",
" # Infer dynamic tensor shape of input\n",
" else:\n",
" input_dims = tf.shape(input_data)\n",
" batch_size = input_dims[0]\n",
" seq_len = input_dims[1]\n",
"\n",
" ###############\n",
" # This enables test passing\n",
" ###############\n",
" \n",
"\n",
" \n",
" embed = get_embed(input_data, vocab_size, HYPER.embedding_size)\n",
" \n",
@ -665,21 +693,23 @@
" \n",
" # Put outputs in rows\n",
" # make the output into [batch_size*time_step, rnn_size] for easy matmul\n",
" outputs = tf.reshape(raw_rnn_outputs, [-1, rnn_size])\n",
" outputs = tf.reshape(raw_rnn_outputs, [-1, rnn_size], name='rnn_output')\n",
" \n",
" \n",
" # Question, why are we using linear activation and not softmax ?\n",
" # My Guess: because seq2seq.sequence_loss has an efficient way to calculate the loss directly from logits \n",
" with tf.variable_scope('linear_layer'):\n",
" linear_w = tf.Variable(tf.truncated_normal((rnn_size, num_outputs), stddev=0.1), name='linear_w')\n",
" linear_w = tf.Variable(tf.truncated_normal((rnn_size, num_outputs), stddev=0.05), name='linear_w')\n",
" linear_b = tf.Variable(tf.zeros(num_outputs), name='linear_b')\n",
" \n",
" logits = tf.matmul(outputs, linear_w) + linear_b\n",
" \n",
" \n",
" \n",
" # Reshape the logits back into the original input shape -> [batch_size, seq_len, num_classes]\n",
" # We do this beceause the loss function seq2seq.sequence_loss takes as logits a shape of [batch_size,seq_len,num_decoded_symbols]\n",
" logits = tf.reshape(logits, [batch_size, -1, num_outputs])\n",
" \n",
" logits = tf.reshape(logits, [batch_size, seq_len, num_outputs], name='logits')\n",
" print('logits after reshape: ', logits)\n",
" \n",
" return logits, final_state\n",
"\n",
@ -728,45 +758,7 @@
},
{
"cell_type": "code",
"execution_count": 141,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Stored '_input' (ndarray)\n",
"Stored '_target' (ndarray)\n",
"Stored 'test_int_text' (list)\n"
]
}
],
"source": [
"batch_size = 128\n",
"seq_length = 5\n",
"slice_size = batch_size * seq_length\n",
"test_int_text = list(range(1000*seq_length))\n",
"n_batches = int(len(test_int_text)/slice_size)\n",
"\n",
"# input part\n",
"_input = np.array(int_text[:n_batches*slice_size])\n",
"\n",
"# target part\n",
"_target = np.array(int_text[1:n_batches*slice_size + 1])\n",
"\n",
"%store _input\n",
"%store _target\n",
"%store test_int_text\n",
"\n",
"for b in range(n_batches):\n",
" print \n"
]
},
{
"cell_type": "code",
"execution_count": 174,
"execution_count": 158,
"metadata": {
"collapsed": false,
"deletable": true,
@ -774,17 +766,10 @@
},
"outputs": [
{
"ename": "AttributeError",
"evalue": "'list' object has no attribute 'shape'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-174-903ff1c73bcc>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mDON\u001b[0m\u001b[0;31m'\u001b[0m\u001b[0mT\u001b[0m \u001b[0mMODIFY\u001b[0m \u001b[0mANYTHING\u001b[0m \u001b[0mIN\u001b[0m \u001b[0mTHIS\u001b[0m \u001b[0mCELL\u001b[0m \u001b[0mTHAT\u001b[0m \u001b[0mIS\u001b[0m \u001b[0mBELOW\u001b[0m \u001b[0mTHIS\u001b[0m \u001b[0mLINE\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \"\"\"\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0mtests\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtest_get_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m/home/spike/ml/udacity/nd101/deep-learning-modified/tv-script-generation/problem_unittests.py\u001b[0m in \u001b[0;36mtest_get_batches\u001b[0;34m(get_batches)\u001b[0m\n\u001b[1;32m 77\u001b[0m \u001b[0mtest_seq_length\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[0mtest_int_text\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mtest_seq_length\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 79\u001b[0;31m \u001b[0mbatches\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_int_text\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_batch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_seq_length\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 80\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[0;31m# Check type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-174-903ff1c73bcc>\u001b[0m in \u001b[0;36mget_batches\u001b[0;34m(int_text, batch_size, seq_length)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 39\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvectorize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_input\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_target\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-174-903ff1c73bcc>\u001b[0m in \u001b[0;36mvectorize\u001b[0;34m(_inputs, _targets)\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;31m# Go through all inputs, targets and split them into batch_size*seq\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mseq_length\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_targets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mseq_length\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 26\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 27\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;31m# Stack inputs and targets into batch_size * seq_length\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'list' object has no attribute 'shape'"
"name": "stdout",
"output_type": "stream",
"text": [
"Tests Passed\n"
]
}
],
@ -802,40 +787,24 @@
" n_batches = int(len(int_text)/slice_size)\n",
" \n",
" # input part\n",
" _input = np.array(int_text[:n_batches*slice_size])\n",
" _inputs = np.array(int_text[:n_batches*slice_size])\n",
" \n",
" # target part\n",
" _target = np.array(int_text[1:n_batches*slice_size + 1])\n",
" \n",
" \n",
" def vectorize(_inputs, _targets):\n",
" # Takes flattened inputs and targets\n",
" # returns shape [n_batches, 2, batch_size, seq_length]\n",
" \n",
" # Go through all inputs, targets and split them into batch_size*seq list of items\n",
" # [batch*seq, batch*seq, ...]\n",
" inputs, targets = np.split(_inputs, batch_size*seq_length), np.split(_targets, batch_size*seq_length)\n",
" \n",
" # Reshape into [batch x seq, batch x seq, ...]\n",
" \n",
" # Stack inputs and targets into batch_size * seq_length \n",
" # Shape should become batch_size x seq_length\n",
" inputs, targets = np.stack(inputs), np.stack(targets)\n",
" \n",
" \n",
" # Stack Inputs and Targets\n",
" batches = np.concatenate((inputs, targets))\n",
" \n",
" return batch\n",
" _targets = np.array(int_text[1:n_batches*slice_size + 1])\n",
" \n",
"\n",
" # Go through all inputs, targets and split them into batch_size*seq_len list of items\n",
" # [batch, batch, ...]\n",
" inputs, targets = np.split(_inputs, n_batches), np.split(_targets, n_batches)\n",
" \n",
" result = vectorize(_input, _target)\n",
" # concat inputs and targets\n",
" batches = np.c_[inputs, targets]\n",
" #print(batches.shape)\n",
" \n",
" \n",
" # preare result as reference for target shape\n",
" #result = np.empty((n_batches, 2, batch_size, seq_length), dtype=np.int32)\n",
" # Reshape into final batches output\n",
" batches = batches.reshape((-1, 2, batch_size, seq_length))\n",
" \n",
" return None\n",
" return batches\n",
"\n",
"\n",
"\"\"\"\n",
@ -865,7 +834,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 164,
"metadata": {
"collapsed": true,
"deletable": true,
@ -874,17 +843,17 @@
"outputs": [],
"source": [
"# Number of Epochs\n",
"num_epochs = None\n",
"num_epochs = 100\n",
"# Batch Size\n",
"batch_size = None\n",
"batch_size = 128\n",
"# RNN Size\n",
"rnn_size = None\n",
"rnn_size = 256\n",
"# Sequence Length\n",
"seq_length = None\n",
"seq_length = 100\n",
"# Learning Rate\n",
"learning_rate = None\n",
"learning_rate = 1e-3\n",
"# Show stats for every n number of batches\n",
"show_every_n_batches = None\n",
"show_every_n_batches = 1\n",
"\n",
"\"\"\"\n",
"DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n",
@ -905,13 +874,43 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 77,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"6779"
]
},
"execution_count": 77,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vocab_size"
]
},
{
"cell_type": "code",
"execution_count": 165,
"metadata": {
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"logits after reshape: Tensor(\"logits:0\", shape=(?, ?, 6779), dtype=float32)\n"
]
}
],
"source": [
"\"\"\"\n",
"DON'T MODIFY ANYTHING IN THIS CELL\n",
@ -944,6 +943,29 @@
" train_op = optimizer.apply_gradients(capped_gradients)"
]
},
{
"cell_type": "code",
"execution_count": 163,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"5"
]
},
"execution_count": 163,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"batches = get_batches(int_text, batch_size, seq_length)\n",
"len(batches)"
]
},
{
"cell_type": "markdown",
"metadata": {
@ -963,7 +985,66 @@
"deletable": true,
"editable": true
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 0 Batch 0/5 train_loss = 8.828\n",
"Epoch 0 Batch 1/5 train_loss = 8.793\n",
"Epoch 0 Batch 2/5 train_loss = 8.737\n",
"Epoch 0 Batch 3/5 train_loss = 8.602\n",
"Epoch 0 Batch 4/5 train_loss = 8.298\n",
"Epoch 1 Batch 0/5 train_loss = 7.938\n",
"Epoch 1 Batch 1/5 train_loss = 7.662\n",
"Epoch 1 Batch 2/5 train_loss = 7.364\n",
"Epoch 1 Batch 3/5 train_loss = 7.164\n",
"Epoch 1 Batch 4/5 train_loss = 6.899\n",
"Epoch 2 Batch 0/5 train_loss = 6.596\n",
"Epoch 2 Batch 1/5 train_loss = 6.462\n",
"Epoch 2 Batch 2/5 train_loss = 6.309\n",
"Epoch 2 Batch 3/5 train_loss = 6.330\n",
"Epoch 2 Batch 4/5 train_loss = 6.250\n",
"Epoch 3 Batch 0/5 train_loss = 6.055\n",
"Epoch 3 Batch 1/5 train_loss = 6.048\n",
"Epoch 3 Batch 2/5 train_loss = 6.012\n",
"Epoch 3 Batch 3/5 train_loss = 6.133\n",
"Epoch 3 Batch 4/5 train_loss = 6.159\n",
"Epoch 4 Batch 0/5 train_loss = 5.996\n",
"Epoch 4 Batch 1/5 train_loss = 6.021\n",
"Epoch 4 Batch 2/5 train_loss = 6.010\n",
"Epoch 4 Batch 3/5 train_loss = 6.125\n",
"Epoch 4 Batch 4/5 train_loss = 6.156\n",
"Epoch 5 Batch 0/5 train_loss = 5.978\n",
"Epoch 5 Batch 1/5 train_loss = 5.993\n",
"Epoch 5 Batch 2/5 train_loss = 5.977\n",
"Epoch 5 Batch 3/5 train_loss = 6.081\n",
"Epoch 5 Batch 4/5 train_loss = 6.103\n",
"Epoch 6 Batch 0/5 train_loss = 5.928\n",
"Epoch 6 Batch 1/5 train_loss = 5.950\n",
"Epoch 6 Batch 2/5 train_loss = 5.938\n",
"Epoch 6 Batch 3/5 train_loss = 6.053\n",
"Epoch 6 Batch 4/5 train_loss = 6.074\n",
"Epoch 7 Batch 0/5 train_loss = 5.909\n",
"Epoch 7 Batch 1/5 train_loss = 5.937\n",
"Epoch 7 Batch 2/5 train_loss = 5.925\n",
"Epoch 7 Batch 3/5 train_loss = 6.043\n",
"Epoch 7 Batch 4/5 train_loss = 6.060\n",
"Epoch 8 Batch 0/5 train_loss = 5.896\n",
"Epoch 8 Batch 1/5 train_loss = 5.922\n",
"Epoch 8 Batch 2/5 train_loss = 5.912\n",
"Epoch 8 Batch 3/5 train_loss = 6.028\n",
"Epoch 8 Batch 4/5 train_loss = 6.049\n",
"Epoch 9 Batch 0/5 train_loss = 5.889\n",
"Epoch 9 Batch 1/5 train_loss = 5.912\n",
"Epoch 9 Batch 2/5 train_loss = 5.906\n",
"Epoch 9 Batch 3/5 train_loss = 6.020\n",
"Epoch 9 Batch 4/5 train_loss = 6.042\n",
"Epoch 10 Batch 0/5 train_loss = 5.884\n",
"Epoch 10 Batch 1/5 train_loss = 5.905\n"
]
}
],
"source": [
"\"\"\"\n",
"DON'T MODIFY ANYTHING IN THIS CELL\n",
@ -1238,7 +1319,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.1"
"version": "3.5.2"
},
"toc": {
"colors": {

@ -0,0 +1,2 @@
model_checkpoint_path: "save"
all_model_checkpoint_paths: "save"

File diff suppressed because it is too large Load Diff

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:43aa05ca53cc94bc12959afc391adedffb9df33345748d2a55f22d6c587bd8cf
size 21

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:be490d22f681ecd706cf3a53a5e7ec52421d9eb7f11e4cdb612740f915b9eab7
size 391674
oid sha256:0c4525fb720ac816b5ee0720ef52e698cdab40204342e87096c2b4356b9829a8
size 387442

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a0493768a7bb2c6cee62b851a65725d1a0b03e9845e0e8912efdef605566c426
size 49730300

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:848fb0f5cc5f656c5f5ced00df9f6ba77eef8d67cccef78a03daff24e8e746e3
size 981

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ab485075110bf5e5ddfc66a4de50319cc84615affd5098e605e34f1dd6f7bf9a
size 303291
Loading…
Cancel
Save