Computes the regularized predicted response \(\hat{y}_{\delta_{lb},M}(x)\) and the mean squared error \(s^2_{\delta_{lb},M}(x)\) for a new set of inputs using the fitted GP model.

The value of M determines the number of iterations (or terms) in approximating \(R^{-1} \approx R^{-1}_{\delta_{lb},M}\). The iterative use of the nugget \(\delta_{lb}\), as outlined in Ranjan et al. (2011), is used in calculating \(\hat{y}_{\delta_{lb},M}(x)\) and \(s^2_{\delta_{lb},M}(x)\), where \(R_{\delta,M}^{-1} = \sum_{t = 1}^{M} \delta^{t - 1}(R+\delta I)^{-t}\).

# S3 method for class 'GP'
predict(object, xnew = object$X, M = 1, ...)

# S3 method for class 'GP'
fitted(object, ...)

Arguments

object

a class GP object estimated by GP_fit

xnew

the (n_new x d) design matrix of test points where model predictions and MSEs are desired

M

the number of iterations. See 'Details'

...

for compatibility with generic method predict

Value

Returns a list containing the predicted values (Y_hat), the mean squared errors of the predictions (MSE), and a matrix (complete_data) containing xnew, Y_hat, and MSE

Methods (by class)

  • GP: The predict method returns a list of elements Y_hat (fitted values), Y (dependent variable), MSE (residuals), and completed_data (the matrix of independent variables, Y_hat, and MSE).

  • GP: The fitted method extracts the complete data.

References

Ranjan, P., Haynes, R., and Karsten, R. (2011). A Computationally Stable Approach to Gaussian Process Interpolation of Deterministic Computer Simulation Data, Technometrics, 53(4), 366 - 378.

See also

GP_fit for estimating the parameters of the GP model;
plot for plotting the predicted and error surfaces.

Author

Blake MacDonald, Hugh Chipman, Pritam Ranjan

Examples


## 1D Example
n <- 5
d <- 1
computer_simulator <- function(x){
    x <- 2*x+0.5
    sin(10*pi*x)/(2*x) + (x-1)^4
}
set.seed(3)
library(lhs)
x <- maximinLHS(n,d)
y <- computer_simulator(x)
xvec <- seq(from = 0, to = 1, length.out = 10)
GPmodel <- GP_fit(x, y)
head(fitted(GPmodel))
#>         xnew.1       Y_hat          MSE
#> [1,] 0.3010048 -0.02851689 2.064100e-13
#> [2,] 0.7068071  0.58833268 1.322314e-13
#> [3,] 0.5114499 -0.14158528 9.352954e-14
#> [4,] 0.9735839  4.58957957 0.000000e+00
#> [5,] 0.1659417  0.50709615 4.515219e-14
lapply(predict(GPmodel, xvec), head)
#> $Y_hat
#> [1]  1.33458263  0.77315980  0.25039738 -0.09418317 -0.16999139 -0.10823943
#> 
#> $MSE
#> [1] 0.638163882 0.050885326 0.013089467 0.008366179 0.021953138 0.013297831
#> 
#> $complete_data
#>         xnew.1       Y_hat         MSE
#> [1,] 0.0000000  1.33458263 0.638163882
#> [2,] 0.1111111  0.77315980 0.050885326
#> [3,] 0.2222222  0.25039738 0.013089467
#> [4,] 0.3333333 -0.09418317 0.008366179
#> [5,] 0.4444444 -0.16999139 0.021953138
#> [6,] 0.5555556 -0.10823943 0.013297831
#> 


## 1D Example 2
n <- 7
d <- 1
computer_simulator <- function(x) {
    log(x+0.1)+sin(5*pi*x)
}
set.seed(1)
library(lhs)
x <- maximinLHS(n,d)
y <- computer_simulator(x)
xvec <- seq(from = 0,to = 1, length.out = 10)
GPmodel <- GP_fit(x, y)
head(fitted(GPmodel))
#>          xnew.1       Y_hat          MSE
#> [1,] 0.80738197  0.01850420 0.000000e+00
#> [2,] 0.30365073 -1.90556151 0.000000e+00
#> [3,] 0.46674581  0.29880194 7.623608e-16
#> [4,] 0.05515916 -1.10127666 0.000000e+00
#> [5,] 0.57334148  0.01111711 5.717706e-16
#> [6,] 0.91176971  0.99465964 0.000000e+00
predict(GPmodel, xvec)
#> $Y_hat
#>  [1] -0.9712114 -1.1152181 -1.5721000 -1.6834105  0.0887076  0.1386069
#>  [7] -0.4813026 -0.2520557  0.8776572  0.5857825
#> 
#> $MSE
#>  [1] 0.25550947 0.19513657 0.06158031 0.01946331 0.02099483 0.01392143
#>  [7] 0.40041210 0.05903336 0.02221234 0.49104954
#> 
#> $complete_data
#>          xnew.1      Y_hat        MSE
#>  [1,] 0.0000000 -0.9712114 0.25550947
#>  [2,] 0.1111111 -1.1152181 0.19513657
#>  [3,] 0.2222222 -1.5721000 0.06158031
#>  [4,] 0.3333333 -1.6834105 0.01946331
#>  [5,] 0.4444444  0.0887076 0.02099483
#>  [6,] 0.5555556  0.1386069 0.01392143
#>  [7,] 0.6666667 -0.4813026 0.40041210
#>  [8,] 0.7777778 -0.2520557 0.05903336
#>  [9,] 0.8888889  0.8776572 0.02221234
#> [10,] 1.0000000  0.5857825 0.49104954
#> 

## 2D Example: GoldPrice Function
computer_simulator <- function(x) {
    x1 <- 4*x[,1] - 2
    x2 <- 4*x[,2] - 2
    t1 <- 1 + (x1 + x2 + 1)^2*(19 - 14*x1 + 3*x1^2 - 14*x2 + 
        6*x1*x2 + 3*x2^2)
    t2 <- 30 + (2*x1 -3*x2)^2*(18 - 32*x1 + 12*x1^2 + 48*x2 - 
        36*x1*x2 + 27*x2^2)
    y <- t1*t2
    return(y)
}
n <- 10
d <- 2
set.seed(1)
library(lhs)
x <- maximinLHS(n,d) 
y <- computer_simulator(x)
GPmodel <- GP_fit(x,y)
#> Warning: NaNs produced
#> Error in GP_deviance(beta = row, X = X, Y = Y, nug_thres = nug_thres,     corr = corr): Infinite values of the Deviance Function, 
#>             unable to find optimum parameters 
# fitted values
head(fitted(GPmodel))
#>          xnew.1       Y_hat          MSE
#> [1,] 0.80738197  0.01850420 0.000000e+00
#> [2,] 0.30365073 -1.90556151 0.000000e+00
#> [3,] 0.46674581  0.29880194 7.623608e-16
#> [4,] 0.05515916 -1.10127666 0.000000e+00
#> [5,] 0.57334148  0.01111711 5.717706e-16
#> [6,] 0.91176971  0.99465964 0.000000e+00
# new data
xvector <- seq(from = 0,to = 1, length.out = 10)
xdf <- expand.grid(x = xvector, y = xvector)
predict(GPmodel, xdf)
#> Error in predict.GP(GPmodel, xdf): The training and prediction data sets are of 
#>         different dimensions.