楼主: 956487315
1094 0

[问答] 再次求助R语言转为MATLAB [推广有奖]

  • 0关注
  • 0粉丝

小学生

0%

还不是VIP/贵宾

-

威望
0
论坛币
0 个
通用积分
0
学术水平
0 点
热心指数
0 点
信用等级
0 点
经验
33 点
帖子
2
精华
0
在线时间
5 小时
注册时间
2015-9-19
最后登录
2017-5-18

楼主
956487315 发表于 2017-3-8 10:15:56 |AI写论文

+2 论坛币
k人 参与回答

经管之家送您一份

应届毕业生专属福利!

求职就业群
赵安豆老师微信:zhaoandou666

经管之家联合CDA

送您一个全额奖学金名额~ !

感谢您参与论坛问题回答

经管之家送您两个论坛币!

+2 论坛币
(代码1)require(Matrix)
require(tseries)

#self-paced function: hard scheme
eval_f_hard <- function(v, loss, lambda) {
        obj = v %*% loss - lambda*sum(v)
        return(obj)
}

#self-paced gradient: hard scheme
grad_f_hard <- function(v, loss, lambda) {
        grads = loss - lambda
        return(grads)
}

#self-paced function: linear scheme
eval_f_linear <- function(v, loss, lambda) {
        obj = v %*% loss + 1/2*lambda*sum(v^2) - lambda*sum(v)
        return(obj)
}

#self-paced gradient: linear scheme
grad_f_linear <- function(v, loss, lambda) {
        grads = loss + lambda * v - lambda
        return(grads)
}

#self-paced function: log scheme
eval_f_log <- function(v, loss, lambda) {
        zeta = 1-lambda
        obj = v %*% loss + zeta*sum(v) - sum(zeta^v)/log(zeta)
        return(obj)
}

#self-paced gradient: log scheme
grad_f_log <- function(v, loss, lambda) {
        zeta = 1-lambda
        grads = loss + zeta - zeta^v
        return(grads)
}

#self-paced function: mixture scheme
eval_f_mixture <- function(v, loss, lambda, lambda2 = 0.15) {
        #lambda here represents lambda1 in paper (lambda > lambda2 > 0)
        zeta = lambda*lambda2/(lambda-lambda2)
        obj = v %*% loss - zeta * sum(log(v+zeta/lambda))
        return(obj)
}

#self-paced gradient: mixture scheme
grad_f_mixture <- function(v, loss, lambda, lambda2 = 0.15) {
        #lambda here represents lambda1 in paper (lambda > lambda2 > 0)
        zeta = lambda*lambda2/(lambda-lambda2)
        grads = loss - zeta*lambda/(v*lambda+zeta)
        return(grads)
}

#closed-form solution: hard scheme
closedform_hard <- function(loss, lambda) {
        v = replicate(length(loss), 0)
        v[which(loss>=lambda)] = 0
        v[which(loss<lambda)] = 1
        return(v)
}

#closed-form solution: linear scheme
closedform_linear <- function(loss, lambda) {
        v = replicate(length(loss), 0)
        v = -1/lambda*loss+1
        v[which(loss>=lambda)]=0
        return(v)
}

#closed-form solution: log scheme
closedform_log <- function(loss, lambda) {
        zeta = 1-lambda
        v = replicate(length(loss), 0)
        v = 1/log(zeta)*log(loss+zeta)
        v[which(loss>=lambda)]=0
        return(v)
}

closedform_mixture <- function(loss, lambda, lambda2) {
        #lambda here represents lambda1 in paper (lambda > lambda2 > 0)
        zeta = (lambda*lambda2)/(lambda-lambda2)
        v = (lambda-loss)*zeta/(lambda*loss)
        v[which(loss>=lambda)] = 0
        v[which(loss<=lambda2)] = 1
        return(v)
}


#Obtain the curriculum (ordered by loss)
getcurriculum_hard <- function(solution, loss) {
        solution[which(solution>0.9)] = 1        #caliberate the solution to 0 and 1 (the solution may have some round errors)
        solution[which(solution<0.1)] = 0
       
        #rank the samples
        idx1 = which(solution==1)
        idx1 = idx1[sort(loss[idx1],index.return=TRUE)$ix]
        idx0 = which(solution==0)
        idx0 = idx0[sort(loss[idx0],index.return=TRUE)$ix]
       
        return(c(idx1,idx0))
}
(代码2)source("lib.r")

print("##############################################")
print("This script lists the implementation of SPCL, and a toy example in our paper.")
print("##############################################")


######################################
######## start of the script #########
######################################

#0) load data
id = letters[1:6]
myloss = c(0.1,0.2,0.4,0.6,0.5,0.3)
print("####### input loss ######")
print(myloss)


#1) optimize v without curriculum constraints
{
        v0 = replicate(length(id),0)                #initial values
        tolerance = 10^-7                                        #a small constant for optmization accuracy
        lambda = 0.83333                                        #parameter in self-paced learning                                                       
       
        u2 = diag(replicate(length(v0),1))                #v >= 0 - 10^-7
        u3 = -1*diag(replicate(length(v0),1))        #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7
        ui = rbind(u2,u3)
       
        c2 = replicate(length(v0),-1*tolerance)         #v >= 0 - 10^-7
        c3 = -1*replicate(length(v0),1+tolerance) #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7
        ci = c(c2,c3)
       
       
        ui %*% v0 - ci >= 0                #check the feasibility of the initial values v0

        #linear soft scheme
        solution1 = constrOptim(theta = v0,
                  f = eval_f_hard,
                  grad = grad_f_hard,
                  ui = ui,
                  ci = ci,
                  loss = myloss, lambda=lambda)$par
                  
        #log soft scheme
        solution2 = constrOptim(theta = v0,
                  f = eval_f_log,
                  grad = grad_f_log,
                  ui = ui,
                  ci = ci,
                  loss = myloss, lambda=lambda)$par
       
        print("##############################################")
        print("--hard scheme w/o curriculum Constraint")
        print(solution1)
        print("--log scheme w/o curriculum Constraint")
        print(solution2)
       
       
       
        #compare with the closed-form solution for unconstrained problems
        closed_solution1 = closedform_hard(myloss,lambda)
        closed_solution2 = closedform_log(myloss,lambda)
       
        print("##############################################")
        print(paste("--linear scheme MSE with closed-form solution", sum((solution1-closed_solution1)^2)))
        print(paste("--log scheme MSE with closed-form solution", sum((solution2-closed_solution2)^2)))
       
        print("##############################################")
        #print the curriculum (ordered by loss)
        print("SPL curriculum (ordered by loss)")
        print(id[getcurriculum_hard(solution1, myloss)])
}



#4) calculate curriculum constraints
A = matrix(0, nrow=length(id), ncol=1)
A[,1] = c(0.1, 0.0, 0.4, 0.3, 0.5, 1.0)                #curriculum constraints matrix
c = c(1)                                                                        #curriculum constraints vector

print("####### A matrix ######")
print(A)


#5) optimize v with modality constraint (A)
{
        v0 = replicate(length(id),0)                #initial values
        tolerance = 10^-7                                        #a small constant for optmization accuracy
        lambda=0.8333                                                #parameter in self-paced learning

        u1 = -1*t(A)                                                                #-Av >= -c        i.e.        Av <= c
        u2 = diag(replicate(length(v0),1))                        #v >= 0 - 10^-7
        u3 = -1*diag(replicate(length(v0),1))                #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7
        ui = rbind(u1,u2,u3)                                               
       
       
        c1 = -1*c - tolerance                                                #-Av >= -c        i.e.        Av <= c
        c2 = replicate(length(v0),-1*tolerance)                 #v >= 0 - 10^-7
        c3 = -1*replicate(length(v0),1+tolerance)         #-v >= -1 - 10^-7 i.e. v <= 1 + 10^-7
        ci = c(c1,c2,c3)                               
       
        #check the feasibility of initial values
        ui %*% v0 - ci >= 0
       
        solution3 = constrOptim(theta = v0,
                f = eval_f_hard,
                grad = grad_f_hard,
                ui = ui,
                ci = ci,
                loss = myloss, lambda=lambda)$par

        #ui %*% solution3 - ci >= 0
       
        print("--hard scheme w/ curriculum Constraint")
        print(solution3)
        print("SPCL curriculum:")
        print(id[sort(solution3,index.return=TRUE, decreasing=TRUE)$ix])
}


######################################
########   Rank Correlation  #########
######################################
gt = c("a","b","c","d","e","f")
cl = c("b","a","d","c","e","f")
spl = id[getcurriculum_hard(solution1, myloss)]
spld = id[sort(solution3,index.return=TRUE, decreasing=TRUE)$ix]

gtrank = unlist(lapply(gt, function(x){which(gt==x)}))
clrank = unlist(lapply(cl, function(x){which(gt==x)}))
splrank = unlist(lapply(spl, function(x){which(gt==x)}))
spldrank = unlist(lapply(spld, function(x){which(gt==x)}))

print(paste("CL rank correlation =", cor(cbind(clrank,gtrank), method="kendall", use="pairwise")[1,2]))
print(paste("SPL rank correlation =", cor(cbind(splrank,gtrank), method="kendall", use="pairwise")[1,2]))
print(paste("SPCL rank correlation =", cor(cbind(spldrank,gtrank), method="kendall", use="pairwise")[1,2]))


二维码

扫码加我 拉你入群

请注明:姓名-公司-职位

以便审核进群资格,未注明则拒绝

关键词:MATLAB matla 求助R语言 atlab Atl R语言 MATLAB 高校

您需要登录后才可以回帖 登录 | 我要注册

本版微信群
加好友,备注cda
拉您进交流群
GMT+8, 2025-12-21 12:59