library(processpredictR)
library(bupaR)
library(keras)
library(purrr)
We can also opt for setting up and training our model manually,
instead of using the provided processpredictR
methods. Note
that after defining a model with keras::keras_model()
the
model no longer is of class ppred_model
(as opposed to
processpredictR::create_model()
).
The layers on top of a custom model (previously defined in adapting your model) can be added as follows:
<- custom_model$model$output %>% # custom_model$model to access a model and $output to access the outputs of that model
new_outputs ::layer_dropout(rate = 0.1) %>%
keras::layer_dense(units = custom_model$num_outputs, activation = 'softmax')
keras
<- keras::keras_model(inputs = custom_model$model$input,
custom_model outputs = new_outputs,
name = "new_custom_model")
custom_model
#> Model: "new_custom_model"
#> ________________________________________________________________________________
#> Layer (type) Output Shape Param #
#> ================================================================================
#> input_2 (InputLayer) [(None, 9)] 0
#> token_and_position_embedding_1 (To (None, 9, 36) 828
#> kenAndPositionEmbedding)
#> transformer_block_1 (TransformerBl (None, 9, 36) 26056
#> ock)
#> global_average_pooling1d_1 (Global (None, 36) 0
#> AveragePooling1D)
#> dropout_6 (Dropout) (None, 36) 0
#> dense_6 (Dense) (None, 64) 2368
#> dropout_8 (Dropout) (None, 64) 0
#> dense_8 (Dense) (None, 11) 715
#> ================================================================================
#> Total params: 29,967
#> Trainable params: 29,967
#> Non-trainable params: 0
#> ________________________________________________________________________________
# class of the model
%>% class custom_model
#> [1] "keras.engine.functional.Functional"
#> [2] "keras.engine.training.Model"
#> [3] "keras.engine.base_layer.Layer"
#> [4] "tensorflow.python.module.module.Module"
#> [5] "tensorflow.python.trackable.autotrackable.AutoTrackable"
#> [6] "tensorflow.python.trackable.base.Trackable"
#> [7] "keras.utils.version_utils.LayerVersionSelector"
#> [8] "keras.utils.version_utils.ModelVersionSelector"
#> [9] "python.builtin.object"
processpredictR::tokenize()
Before training the model we first must prepare the data.
processpredictR
provides the tokenize()
function, but other alternatives (see working
with preprocessing layers) can also be used.
# the trace of activities must be tokenized
<- df$train_df %>% tokenize()
tokens_train map(tokens_train, head) # the output of tokens is a list
#> $token_x
#> $token_x[[1]]
#> [1] 2
#>
#> $token_x[[2]]
#> [1] 2 3
#>
#> $token_x[[3]]
#> [1] 2
#>
#> $token_x[[4]]
#> [1] 2 4
#>
#> $token_x[[5]]
#> [1] 2 4 5
#>
#> $token_x[[6]]
#> [1] 2 4 5 6
#>
#>
#> $numeric_features
#> NULL
#>
#> $categorical_features
#> NULL
#>
#> $token_y
#> [1] 0 1 2 3 4 5
Padding of sequences is required to make every input sequence of an equal length.
# make sequences of equal length
<- tokens_train$token_x %>% pad_sequences(maxlen = max_case_length(df$train_df), value = 0)
x <- tokens_train$token_y y
Note that
keras::keras_model()
no longer returns a list, so we simply refer to the model as to an object.
Do not forget to compile the model
# compile
compile(object=custom_model, optimizer = "adam",
loss = loss_sparse_categorical_crossentropy(),
metrics = metric_sparse_categorical_crossentropy())
We are now ready to train our custom model (the code below is not being evaluated).
# train
fit(object = custom_model, x, y, epochs = 10, batch_size = 10)
# see also ?keras::fit.keras.engine.training.Model
# predict
<- df$test_df %>% tokenize()
tokens_test <- tokens_test$token_x %>% pad_sequences(maxlen = max_case_length(df$train_df), value = 0)
x predict(custom_model, x)
# evaluate
<- df$test_df %>% tokenize()
tokens_test <- tokens_test$token_x
x # normalize by dividing y_test over the standard deviation of y_train
<- tokens_test$token_y / sd(tokens_train$token_y)
y evaluate(custom_model, x, y)
Read more:
Copyright © 2023 bupaR - Hasselt University