@@ -117,6 +117,9 @@ import string
117
117
import re
118
118
import zipfile
119
119
import os
120
+
121
+ # Creating the random instance
122
+ rng = np.random.default_rng()
120
123
```
121
124
122
125
Next, you'll define set of text preprocessing helper functions.
@@ -458,20 +461,20 @@ Lets start with writing a function to randomly initialize the parameters which w
458
461
``` python
459
462
def initialise_params (hidden_dim , input_dim ):
460
463
# forget gate
461
- Wf = np.random.randn( hidden_dim, hidden_dim + input_dim)
462
- bf = np.random.randn( hidden_dim, 1 )
464
+ Wf = rng.standard_normal( size = ( hidden_dim, hidden_dim + input_dim) )
465
+ bf = rng.standard_normal( size = ( hidden_dim, 1 ) )
463
466
# input gate
464
- Wi = np.random.randn( hidden_dim, hidden_dim + input_dim)
465
- bi = np.random.randn( hidden_dim, 1 )
467
+ Wi = rng.standard_normal( size = ( hidden_dim, hidden_dim + input_dim) )
468
+ bi = rng.standard_normal( size = ( hidden_dim, 1 ) )
466
469
# candidate memory gate
467
- Wcm = np.random.randn( hidden_dim, hidden_dim + input_dim)
468
- bcm = np.random.randn( hidden_dim, 1 )
470
+ Wcm = rng.standard_normal( size = ( hidden_dim, hidden_dim + input_dim) )
471
+ bcm = rng.standard_normal( size = ( hidden_dim, 1 ) )
469
472
# output gate
470
- Wo = np.random.randn( hidden_dim, hidden_dim + input_dim)
471
- bo = np.random.randn( hidden_dim, 1 )
473
+ Wo = rng.standard_normal( size = ( hidden_dim, hidden_dim + input_dim) )
474
+ bo = rng.standard_normal( size = ( hidden_dim, 1 ) )
472
475
473
476
# fully connected layer for classification
474
- W2 = np.random.randn( 1 , hidden_dim)
477
+ W2 = rng.standard_normal( size = ( 1 , hidden_dim) )
475
478
b2 = np.zeros((1 , 1 ))
476
479
477
480
parameters = {
@@ -575,7 +578,7 @@ def forward_prop(X_vec, parameters, input_dim):
575
578
# Retrieve word corresponding to current time step
576
579
x = X_vec[t]
577
580
# Retrieve the embedding for the word and reshape it to make the LSTM happy
578
- xt = emb_matrix.get(x, np .random.rand( input_dim, 1 ))
581
+ xt = emb_matrix.get(x, rng .random( size = ( input_dim, 1 ) ))
579
582
xt = xt.reshape((input_dim, 1 ))
580
583
581
584
# Input to the gates is concatenated previous hidden state and current word embedding
0 commit comments