Neural networks are an awesome modeling technique for finding complex patterns in our data. Most Neural Network R packages will have levers for customizing the model architecture. For example, the R package RSNNS (Stuggart Neural Network Simulator) contains a multi-augment function called mlp. The mlp function creates a multi-layer perception based on the argument values provided by the developer. If we want to build the best model possible we need a way to test the model architecture. One option is to manually run the function and test different sets of configurations for the best model. Another, more automated, way of parameter evaluation is to randomly generate configuration sets.
Lots of parameters to choose from:
mlp(x, y, size = c(5), maxit = 100,
initFunc = "Randomize_Weights", initFuncParams = c(-0.3, 0.3),
learnFunc = "Std_Backpropagation", learnFuncParams = c(0.2, 0),
updateFunc = "Topological_Order", updateFuncParams = c(0),
hiddenActFunc = "Act_Logistic", shufflePatterns = TRUE, linOut = FALSE,
outputActFunc = if (linOut) "Act_Identity" else "Act_Logistic",
inputsTest = NULL, targetsTest = NULL, pruneFunc = NULL,
pruneFuncParams = NULL, ...)
A technique I found to be useful is randomly generating the configurations using either a sample or random uniform distribution. Then set the function inside a loop and run many simulations with many configuration sets. R has a popular package called "foreach" which can be used for our loop. The added benefit of the foreach package is it's compatibility with Azure. If the developer specifies %dopar% instead then the workload will execute against a specified number of VMs in Azure. Running the workload against Azure can be orders of magnitude faster. In some cases I have experienced a 10x reduction in processing time.
The dev needs to set up a batch serivices account and a storage account before they can use the %dopar% functionality. After that, it's only a few quick configuration steps which can be ran inside a local R session. Here is 20, one core machines in the boot process:
These were configured directly from my R session.
Here is a sample script for testing the service.
install.packages("RSNNS")
install.packages("clusterSim")
install.packages("dplyr")
install.packages("curl")
install.packages("httr")
install.packages("devtools")
install.packages("bitops")
install.packages("party")
install.packages("rpart")
install.packages("e1071")
install.packages("scales")
install.packages("reshape")
install.packages("ggplot2")
library(devtools) # Use Dev tools to install from github
install_github("Azure/rAzureBatch")
install_github("Azure/doAzureParallel")
sessionInfo()
.libPaths("D:\\Rpackages")
# 1) Import Data
getwd()
setwd('C:\\Users\\mshar\\Desktop\\SQL_Saturday')
#data from kaggle.com
my.data <- 2 read.csv('hr_comma_sep.csv') # take a look head(my.data) class(my.data) nrow(my.data) data prep my.target <- my.data$left my.inputs my.data[, -10] #removes target var my.numericcol sapply(my.inputs, is.numeric) frame is list my.numericinputs my.inputs[, my.numericcol] my.nominalinputs false] my.rawinputs my.numericinputs[, 3:5] library(clustersim) my.norminputs data.normalization(my.rawinputs, type="n1" , normalization="column" ) library(rsnns) my.nominalinputsbool apply(my.nominalinputs, 2, decodeclasslabels) my.data cbind(my.numericinputs[, 1:2], my.norminputs, my.nominalinputsbool, my.target) #write.csv(my.finaldata, file="C:\\Users\\mshar\\Desktop\\SQL_Saturday\\cleanData.csv" #shuffle my.data[sample(1:nrow(my.data), length(1:nrow(my.data))), 1:ncol(my.data)] my.splitdata splitfortrainingandtest(my.data[, 1:18], 19], .33) my.model mlp(x="my.splitData$inputsTrain," y="my.splitData$targetsTrain" size="c(5)," maxit="50" learnfuncparams="c(0.2," 0.0), learnfunc="Std_Backpropagation" #testing my.prediction predict(my.model, my.splitdata$inputstest) my.error sqrt(sum((my.prediction - my.splitdata$targetstest) ^ 2)) #rsme ifelse(my.prediction>= .9, 1, 0)
my.output <- cbind(my.prediction, my.splitdata$targetstest) #evaluation confusionmatrix(my.splitdata$targetstest, my.prediction) my.truepositiverate <- sum(my.output[my.splitdata$targetstest="=" 1, 1]) 2]) my.truenegativerate (sum(ifelse(my.output[my.splitdata$targetstest="=" 0, 1]="=" 0))) (nrow(my.output) - 2])) my.error library(scales) library(reshape) library(devtools) library(ggplot2) plot.nnet(my.model) # performs well, but we can tune the network architecture 1. use intuition, research to find most set optimal configurations or 2. randomly generate configuration sets, run mlp function with random configs on a loop neuralnetworksimulation function(my.maxit, my.numhidddenneurons, my.learningrate, my.learnfunctionswitch) { #---------------------------------------------------- my.learnfunction switch(my.learnfunctionswitch, "std_backpropagation", "backpropbatch", "backpropchunk", "backpropmomentum", "backpropweightdecay", "rprop", "quickprop", "scg") startdate sys.time() my.model mlp(x="my.splitData$inputsTrain," y="my.splitData$targetsTrain," size="my.numHidddenNeurons," maxit="my.maxit," learnfuncparam="c(.3)," learnfunc="my.learnfunction" ) my.executiontime difftime(sys.time(), startdate, units="secs" my.prediction predict(my.model, my.splitdata$inputstest) sqrt(sum((my.prediction ^ 2)) #rsme ifelse(my.prediction>= .9, 1, 0)
my.output <- 1000 cbind(my.prediction, my.splitdata$targetstest) my.truepositiverate <- sum(my.output[my.splitdata$targetstest="=" 1, 1]) 2]) my.truenegativerate (sum(ifelse(my.output[my.splitdata$targetstest="=" 0, 1]="=" 0))) (nrow(my.output) - 2])) my.results cbind( iterations="my.maxit," hiddenlayerneuronsl1="my.numHidddenNeurons," learningfunction="my.learnfunction," learningrate="my.learningRate," test_rmse="my.error," truepositiverate="my.TruePositiveRate," executiontime="my.executionTime)" return(my.results) } neuralnetworksimulation( my.maxit sample(50:400, 1), my.numhidddenneurons sample(4:16, my.learningrate runif(1, .1, .5), my.learnfunctionswitch sample(1:8, 1) ) ## execute simulation to analyze the optimal network configurations resource intensive as data grows run against doazureparallel backend for scale and many simulations at once library(doazureparallel) generateclusterconfig("pool_config.json") # generate pool config file generatecredentialsconfig("credentials.json") setcredentials("credentials.json") makecluster("pool_config.json") create registerdoazureparallel(pool) #register validate cluste configured getdoparworkers() invokeneuralnetwork() parstartdate sys.time() my.packages c("rsnns") my.iterations my.simulation foreach(i="1:my.iterations," .combine="c" , .packages="my.packages)" %do% { parexecutiontime difftime(sys.time(), parstartdate, units="mins" #write flat my.simulationstg matrix(my.simulation, nrow="my.iterations," ncol="8," byrow="TRUE)" df data.frame(my.simulationstg) colnames(df) c("iterations", "hiddenlayerneuronsl1", "learningfunction", "learningrate", "test_rmse", "truepositiverate", "truenegativerate", "executiontime") write.csv(df, #shut down clusters stopcluster(pool) < code>->->->
Comments