- 阅读权限
- 255
- 威望
- 2 级
- 论坛币
- 28191 个
- 通用积分
- 1739.6743
- 学术水平
- 410 点
- 热心指数
- 421 点
- 信用等级
- 355 点
- 经验
- 2099 点
- 帖子
- 1410
- 精华
- 1
- 在线时间
- 1035 小时
- 注册时间
- 2010-6-18
- 最后登录
- 2023-8-18
|
rms 6.2-0 calibrate的源代码: - function (fit, predy, method = c("boot", "crossvalidation",
- ".632", "randomization"), B = 40, bw = FALSE,
- rule = c("aic", "p"), type = c("residual",
- "individual"), sls = 0.05, aics = 0, force = NULL,
- estimates = TRUE, pr = FALSE, kint, smoother = "lowess",
- digits = NULL, ...)
- {
- call <- match.call()
- method <- match.arg(method)
- rule <- match.arg(rule)
- type <- match.arg(type)
- ns <- num.intercepts(fit)
- if (missing(kint))
- kint <- floor((ns + 1)/2)
- clas <- attr(fit, "class")
- model <- if (any(clas == "lrm"))
- "lr"
- else if (any(clas == "ols"))
- "ol"
- else stop("fit must be from lrm or ols")
- lev.name <- NULL
- yvar.name <- as.character(formula(fit))[2]
- y <- fit$y
- n <- length(y)
- if (length(y) == 0)
- stop("fit did not use x=TRUE,y=TRUE")
- if (model == "lr") {
- y <- factor(y)
- lev.name <- levels(y)[kint + 1]
- fit$y <- as.integer(y) - 1
- }
- predicted <- if (model == "lr")
- plogis(fit$linear.predictors - fit$coefficients[1] +
- fit$coefficients[kint])
- else fit$linear.predictors
- if (missing(predy)) {
- if (n < 11)
- stop("must have n > 10 if do not specify predy")
- p <- sort(predicted)
- predy <- seq(p[5], p[n - 4], length = 50)
- p <- NULL
- }
- penalty.matrix <- fit$penalty.matrix
- cal.error <- function(x, y, iter, smoother, predy, kint,
- model, digits = NULL, ...) {
- if (model == "lr") {
- x <- plogis(x)
- y <- y >= kint
- }
- if (length(digits))
- x <- round(x, digits)
- smo <- if (is.function(smoother))
- smoother(x, y)
- else lowess(x, y, iter = 0)
- cal <- approx(smo, xout = predy, ties = function(x) x[1])$y
- if (iter == 0)
- structure(cal - predy, keepinfo = list(orig.cal = cal))
- else cal - predy
- }
- fitit <- function(x, y, model, penalty.matrix = NULL, xcol = NULL,
- ...) {
- if (length(penalty.matrix) && length(xcol)) {
- if (model == "ol")
- xcol <- xcol[-1] - 1
- penalty.matrix <- penalty.matrix[xcol, xcol, drop = FALSE]
- }
- f <- switch(model, lr = lrm.fit(x, y, penalty.matrix = penalty.matrix,
- tol = 1e-13), ol = if (length(penalty.matrix) ==
- 0) {
- w <- lm.fit.qr.bare(x, y, intercept = TRUE, xpxi = TRUE)
- w$var <- w$xpxi * sum(w$residuals^2)/(length(y) -
- length(w$coefficients))
- w
- } else lm.pfit(x, y, penalty.matrix = penalty.matrix))
- if (any(is.na(f$coefficients)))
- f$fail <- TRUE
- f
- }
- z <- predab.resample(fit, method = method, fit = fitit, measure = cal.error,
- pr = pr, B = B, bw = bw, rule = rule, type = type, sls = sls,
- aics = aics, force = force, estimates = estimates, non.slopes.in.x = model ==
- "ol", smoother = smoother, predy = predy, model = model,
- kint = kint, penalty.matrix = penalty.matrix, ...)
- orig.cal <- attr(z, "keepinfo")$orig.cal
- z <- cbind(predy, calibrated.orig = orig.cal, calibrated.corrected = orig.cal -
- z[, "optimism"], z)
- structure(z, class = "calibrate.default", call = call,
- kint = kint, model = model, lev.name = lev.name, yvar.name = yvar.name,
- n = n, freq = fit$freq, non.slopes = ns, B = B, method = method,
- predicted = predicted, smoother = smoother)
- }
复制代码
|
-
总评分: 论坛币 + 10
学术水平 + 3
热心指数 + 3
信用等级 + 3
查看全部评分
|