Prezados, Nome: Adeilton Pedro de Alcântara Venho pedir ajuda. Definição do problema: Eu tenho uma superfície (mistura de cinco distribuições normal bivariada) lambda_23(x,y) e uma reta lambda_1(t). Considerando a reta e a superfície como conhcidas ( distribuições verdadeiras) o meu objetivo é recuperar essas distribuições maximuzando a equação de log verossimilhança ( no programa que enviei denominada matricaalg1). Ou seja o meu objetivo é recuperar a estrutura do polinômio que gerou um determinado conjunto de dados utilizando uma equação de log-verossimilhança sem a necessidade de escrever um modelo paramétrico. A minha dificuldade está no comando nlminb() do R que encontra-se na função metricaalg1. Eu não consigo lidar com estes comandos de maximização/minimização de funções. Quando altero o chute inicial, embora tenha convergência, os valores encontrados são completamente diferentes do valor que eu quero encontrar. Ou seja dependendo do chute inicial a função me retorna coeficientes completamente diferentes. Eu não sei se o problema está na opção control.list() ou no chute inicial. Provavelmente não. O programa que gera o PPNH( Processo de Poisson não homogêneo) está funcionando. A dificuldade está no uso do comando nlminb(). Já utilizei nlm(), optim(), e DEoptim() para maximizar a equação de log-verossimilhança porém não consigo enxergar o que está errado . O problema deve ser de múltiplos máximos na verossimilhança, pois tenho muitos coeficientes. Já estou a quatro anos e não consigo sair disso. Atenciosamente, Adeilton Pedro de Alcântara. Estou enviando um código na linguagem R. O objetivO é recuperar uma reta (lambda_1(t) )e uma mistura de 5 distribuições normal bivariada (lambda_23(x,y) ). A minha dificuldade está no chute inicial, ou seja os resultados dependem muito do chute inicial. Além disso, cada vez que eu modifici o chute inicial tenho um resultado completamente diferente. O senhor verifica a função onde eu gero o chute inicial (CHUTEINICIALalg1) e a função metricaalg1 (função onde tenho a verossimilhança). Nesta função estou fazendo a maximização numérica da equação de log-verossimilhança utilizaddo o comando "nlminb() do R. Onde tem CC<-(as.single(CC)>0) # Coloquei positivo para não ficar muito tempo rodando. O senhor pode deixar CC<-(as.single(CC)). Para rodar a função basta seguir os comandos a partir de ################# #Rodando a função# ################# O arquivo com as rotinas do R está no anexo deste e-mail Me ajude, por favor. Atenciosamente, Adeilton Pedro de Alcântara ############################################################################### ############################################################################### ############################################################################### ############################################################################### library(splines) library(mgcv) library(fda) ############################################################################### # Início do programa para gerar a Mistura de 5 Distribuições Normais Bivariada# ############################################################################### dnormm<-function(x,m,P) { exp(-0.5*(t(x-m)%*%P%*%(x-m))) } dmixtnormm<-function(x,m1,m2,m3,m4,m5,P1,P2,P3,P4,P5,dP1,dP2,dP3,dP4,dP5) { ptrue=c(0.5,0.5,0.5,0.2,0.1) ptrue=ptrue/sum(ptrue) ptrue[1]*dP1*exp(-0.5*(t(x-m1)%*%P1%*%(x-m1)))+ptrue[2]*dP2*exp(-0.5*(t(x-m2)%*%P2%*%(x-m2)))+ptrue[3]* dP3*exp(-0.5*(t(x-m3)%*%P3%*%(x-m3)))+ptrue[4]*dP4*exp(-0.5*(t(x-m4)%*%P4%*%(x-m4)))+ptrue[5]*dP5*exp(-0.5*(t(x-m5)%*%P5%*%(x-m5))) } ##################################################################### funclambda23<-function(n) { m1<-c(1,1) m2<-c(6,6) m3<-c(1,10) m4<-c(7,1) m5<-c(1.5,5) S1<-matrix(c(2,1.4,1.4,2),2,2) iS1<-solve(S1) iS1<-(iS1 + t(iS1))/2 S2<-matrix(c(1,0.1,0.1,1),2,2) iS2<-solve(S2) iS2<-(iS2 + t(iS2))/2 S3<-matrix(c(1,0.7,0.7,2),2,2) iS3<-solve(S3) iS3<-(iS3 + t(iS3))/2 S4<-matrix(c(1.5,-0.25,-0.25,1.5),2,2) iS4<-solve(S4) iS4<-(iS4 + t(iS4))/2 S5<-matrix(c(0.8,0,0,0.5),2,2) iS5<-solve(S5) iS5<-(iS5 + t(iS5))/2 #determinante da matriz de covariâncias dP1<-prod(diag(chol(iS1))) dP2<-prod(diag(chol(iS2))) dP3<-prod(diag(chol(iS3))) dP4<-prod(diag(chol(iS4))) dP5<-prod(diag(chol(iS5))) #Distribuição verdadeira x<-seq(-4,10,length.out=n) y<-seq(-5,15,length.out=n) fnorm<-matrix(0,length(x),length(y)) for(i in 1: length(x)) { for(j in 1: length(y)) { fnorm[i,j]<-dmixtnormm(c(x[i],y[j]),m1,m2,m3,m4,m5,iS1,iS2,iS3,iS4,iS5,dP1,dP2,dP3,dP4,dP5) } } lambda23<-fnorm list(lambda23=lambda23,x=x,y=y) } ##################################################### # Fim do programa para gerar a Mistura de Normais # ##################################################### ############################################################# #Fim do programa para gerar uma Distribuição lambda_{23}(xy)# ############################################################# ############################################################ #Início do programa para gerar a Distribuição lambda_{1}(t)# ############################################################ funclambda1<-function(n) { t<-seq(0,1,length=n) lambda1<- (1+3*t) list(lambda1=lambda1,t=t) } ######################################################### #Fim do programa para gerar a Distribuição lambda_{1}(t)# ######################################################### #lambda1<-funclambda1(n)$lambda1 #t<-funclambda1(n)$t #plot(t,lambda1 , xlab="t", ylab=" ",type = "l",lty=1, main=expression(lambda[1](t)), col="green", cex = 0.7,lwd=7,bty ="n") ##################################### #Início do programa para integração # ##################################### MC.integrate.3D<-function(fct,low=c(0,-4,-5),upp=c(1,10,15),npoints=100,exact.value=NULL) { points.t<-runif(n=npoints,min=low[1],max=upp[1]) points.x<-runif(n=npoints,min=low[2],max=upp[2]) points.y<-runif(n=npoints,min=low[3],max=upp[3]) approx.tmp<-fct(points.t,points.x,points.y) V.tmp<-diff(c(low[1],upp[1]))*diff(c(low[2],upp[2]))*diff(c(low[3],upp[3])) approx<-mean(approx.tmp)*V.tmp Varapprox<-var(approx.tmp)*V.tmp if(!is.null(exact.value)){cat("To achiev an error of 0.0001 you need at least", "floor(varapprox^2/0.0001^2)),points.\n")} list(approx=approx) } MC.integrate.2D<-function(fct,low=c(-4,-5),upp=c(10,15),npoints=150,exact.value=NULL) { points.x<-runif(n=npoints,min=low[1],max=upp[1]) points.y<-runif(n=npoints,min=low[2],max=upp[2]) approx.tmp<-fct(points.x,points.y) V.tmp<-diff(c(low[1],upp[1]))*diff(c(low[2],upp[2])) approx<-mean(approx.tmp)*V.tmp Varapprox<-var(approx.tmp)*V.tmp if(!is.null(exact.value)){cat("To achiev an error of 0.0001 you need at least", "floor(varapprox^2/0.0001^2)),points.\n")} list(approx=approx) } MC.integrate.1D<-function(fct,low=0,upp=1,npoints=150,exact.value=NULL) { points<-runif(n=npoints,min=low,max=upp) approx.tmp<-apply(as.matrix(points),2,fct) V.tmp<-diff(c(low,upp)) approx<-mean(approx.tmp)*V.tmp Varapprox<-var(approx.tmp)*V.tmp if(!is.null(exact.value)){cat("To achiev an error of 0.0001 you need at least", "floor(varapprox^2/0.0001^2)),points.\n")} list(approx=approx) } ################################## #Fim do programa para integração # ################################## ############################################################# # Inicio do programa para gerar o banco de dados artificial # ############################################################# geragrid<-function(j) { integrand4<-function(t) { (1+3*t) } int4<-integrate(integrand4, lower = 0, upper = 1) ######################################################### #### Mistura de cinco distribuições normal bivariada #### ######################################################### #Para normal 1 mu1<-1 # definindo o valor esperado de x1 mu2<-1 # definindo o valor esperado de x2 s11<-2 # definindo a variancia de x1 s12<-(1.4) # definindo a covariancia entre x1 e x2 s22<-2# definindo a variancia de x2 rho<-(0.7) # definido a coeficiente de correlacao entre x1 e x2 f<-function(x1,x2){ term1 <- 1/(2*pi*sqrt(s11*s22*(1-rho^2))) term2 <- -1/(2*(1-rho^2)) term3 <- (x1-mu1)^2/s11 term4 <- (x2-mu2)^2/s22 term5 <- -2*rho*((x1-mu1)*(x2-mu2))/(sqrt(s11)*sqrt(s22)) term1*exp(term2*(term3+term4-term5)) } # criação da função de densidade normal multivariada f11<-MC.integrate.2D(f,low=c(-4,-5),upp=c(10,15),npoints=100,exact.value=NULL)$approx #Para normal 2 mu1<-6 # definindo o valor esperado de x1 mu2<-6 # definindo o valor esperado de x2 s11<-1 # definindo a variancia de x1 s12<-(0.1) # definindo a covariancia entre x1 e x2 s22<-1# definindo a variancia de x2 rho<-(0.1) # definido a coeficiente de correlacao entre x1 e x2 f<-function(x1,x2){ term1 <- 1/(2*pi*sqrt(s11*s22*(1-rho^2))) term2 <- -1/(2*(1-rho^2)) term3 <- (x1-mu1)^2/s11 term4 <- (x2-mu2)^2/s22 term5 <- -2*rho*((x1-mu1)*(x2-mu2))/(sqrt(s11)*sqrt(s22)) term1*exp(term2*(term3+term4-term5)) } # criação da função de densidade normal multivariada f22<-MC.integrate.2D(f,low=c(-4,-5),upp=c(10,15),npoints=100,exact.value=NULL)$approx #Para normal 3 mu1<-1 # definindo o valor esperado de x1 mu2<-10 # definindo o valor esperado de x2 s11<-(1) # definindo a variancia de x1 s12<-(-0.7) # definindo a covariancia entre x1 e x2 s22<-1# definindo a variancia de x2 rho<-(-0.7) # definido a coeficiente de correlacao entre x1 e x2 f<-function(x1,x2){ term1 <- 1/(2*pi*sqrt(s11*s22*(1-rho^2))) term2 <- -1/(2*(1-rho^2)) term3 <- (x1-mu1)^2/s11 term4 <- (x2-mu2)^2/s22 term5 <- -2*rho*((x1-mu1)*(x2-mu2))/(sqrt(s11)*sqrt(s22)) term1*exp(term2*(term3+term4-term5)) } # criação da função de densidade normal multivariada f3<-MC.integrate.2D(f,low=c(-4,-5),upp=c(10,15),npoints=100,exact.value=NULL)$approx #Para normal 4 mu1<-7 # definindo o valor esperado de x1 mu2<-1 # definindo o valor esperado de x2 s11<-1.5 # definindo a variancia de x1 s12<-(-0.2) # definindo a covariancia entre x1 e x2 s22<-1.5 # definindo a variancia de x2 rho<-(-0.1333333) # definido a coeficiente de correlacao entre x1 e x2 f<-function(x1,x2){ term1 <- 1/(2*pi*sqrt(s11*s22*(1-rho^2))) term2 <- -1/(2*(1-rho^2)) term3 <- (x1-mu1)^2/s11 term4 <- (x2-mu2)^2/s22 term5 <- -2*rho*((x1-mu1)*(x2-mu2))/(sqrt(s11)*sqrt(s22)) term1*exp(term2*(term3+term4-term5)) } # criação da função de densidade normal multivariada f4<-MC.integrate.2D(f,low=c(-4,-5),upp=c(10,15),npoints=100,exact.value=NULL)$approx #Para normal 5 mu1<-1.5 # definindo o valor esperado de x1 mu2<-5 # definindo o valor esperado de x2 s11<-0.8 # definindo a variancia de x1 s12<-(0) # definindo a covariancia entre x1 e x2 s22<-0.5 # definindo a variancia de x2 rho<-(0) # definido a coeficiente de correlacao entre x1 e x2 f<-function(x1,x2){ term1 <- 1/(2*pi*sqrt(s11*s22*(1-rho^2))) term2 <- -1/(2*(1-rho^2)) term3 <- (x1-mu1)^2/s11 term4 <- (x2-mu2)^2/s22 term5 <- -2*rho*((x1-mu1)*(x2-mu2))/(sqrt(s11)*sqrt(s22)) term1*exp(term2*(term3+term4-term5)) } # criação da função de densidade normal multivariada f5<-MC.integrate.2D(f,low=c(-4,-5),upp=c(10,15),npoints=100,exact.value=NULL)$approx V<-(0.28*f11 + 0.28*f22+ 0.28*f3+0.11*f4+0.05*f5)*int4$value a1<-1 a2<-(10) a3<-(15) b1<-(0) b2<-(-4) b3<-(-5) apl1<- (a1 - b1) apl2<- (a2 - b2) apl3<- (a3 - b3) m1<-c(1,1) m2<-c(6,6) m3<-c(1,10) m4<-c(7,1) m5<-c(1.5,5) S1<-matrix(c(2,1.4,1.4,2),2,2) iS1<-solve(S1) iS1<-(iS1 + t(iS1))/2 S2<-matrix(c(1,0.1,0.1,1),2,2) iS2<-solve(S2) iS2<-(iS2 + t(iS2))/2 S3<-matrix(c(1,0.7,0.7,2),2,2) iS3<-solve(S3) iS3<-(iS3 + t(iS3))/2 S4<-matrix(c(1.5,-0.25,-0.25,1.5),2,2) iS4<-solve(S4) iS4<-(iS4 + t(iS4))/2 S5<-matrix(c(0.8,0,0,0.5),2,2) iS5<-solve(S5) iS5<-(iS5 + t(iS5))/2 #determinante da matriz de covariâncias dP1<-prod(diag(chol(iS1))) dP2<-prod(diag(chol(iS2))) dP3<-prod(diag(chol(iS3))) dP4<-prod(diag(chol(iS4))) dP5<-prod(diag(chol(iS5))) lambdabarra = 4/(2*pi*((dP1)^{0.5})) N<-rpois(1, lambdabarra*V) n<-N t<-array(NA, dim = n, dimnames = NULL) x<-array(NA, dim = n, dimnames = NULL) y<-array(NA, dim = n, dimnames = NULL) U<-array(NA, dim = n, dimnames = NULL) RAZAO<- array(NA, dim = n, dimnames = NULL) lambda<-array(NA, dim = n, dimnames = NULL) for(i in 1:n) { RESPOSTA<-TRUE while(RESPOSTA) { tt<-runif(1, min=b1, max=a1) xx<-runif(1, min=b2, max=a2) yy<-runif(1, min=b3, max=a3) U<-runif(1, min=0, max=1) lambdabarra = 4*(2*pi*((dP1)^{0.5})) lambda <- (1+3*tt)*dmixtnormm(c(xx,yy),m1,m2,m3,m4,m5,iS1,iS2,iS3,iS4,iS5,dP1,dP2,dP3,dP4,dP5) RAZAOESTRELA<-(lambda/lambdabarra) if(U0) # Coloquei positivo para não ficar muito tempo rodando mas não sei o certo o que ele faz# #mlec1<-((A1%*%CC) - mean(A1%*%CC))/sqrt((A1%*%CC)>0) #mlec2<-((A2%*%CC) - mean(A2%*%CC))/sqrt((A2%*%CC)>0) #CC<-c(mlec1,mlec2) n1=length(t) n2=length(x) n3=length(y) n=n3 rng1 <- c(min(tt),max(tt)) k1 <- ndt+4 absbasis1 <- create.bspline.basis(rng1, k1) B1 <- getbasismatrix(t,basisobj=absbasis1,nderiv=0) Quad1 <- quadset(basis=absbasis1,breaks=rng1) quadpoints1 <- (Quad1$quadvals)[,1] quadweight1 <- (Quad1$quadvals)[,2] rng2 <- c(min(x),max(x)) k2 <- ndx+4 absbasis2 <- create.bspline.basis(rng2, k2) B2 <- getbasismatrix(x,basisobj=absbasis2,nderiv=0) Quad2 <- quadset(basis=absbasis2,breaks=rng2) quadpoints2 <- (Quad2$quadvals)[,1] quadweight2 <- (Quad2$quadvals)[,2] rng3 <- c(min(y),max(y)) k3 <- ndy+4 absbasis3 <- create.bspline.basis(rng3, k3) B3 <- getbasismatrix(y,basisobj=absbasis3,nderiv=0) Quad3 <- quadset(basis=absbasis3,breaks=rng3) quadpoints3 <- (Quad3$quadvals)[,1] quadweight3 <- (Quad3$quadvals)[,2] p1<-eval.basis(quadpoints2, absbasis2)*quadweight2 p2<-eval.basis(quadpoints3, absbasis3)*quadweight3 Integral23<-kronecker(p1,p2) ################################ # Inicio da função Likelihood # ################################ likelihood = ( (apply(matrix((log(B%*%(A1%*%CC)))+((log(P%*%(A2%*%CC)))) ,n1,1),2,sum))- sum((eval.basis(quadpoints1, absbasis1)%*%(A1%*%CC))*quadweight1)*sum(Integral23%*%(A2%*%CC)) ) return(- likelihood) ########################### # Fim da funçãoLikelihood # ########################### ################################ # Inicio da função Likelihood # ################################ #likelihood = ( (apply(matrix(((B%*%(A1%*%CC)))+(((P%*%(A2%*%CC)))) ,n1,1),2,sum)) # - (1/length(tt))*(max(tt)-min(tt))*(1/length(xx))*(max(xx)-min(xx))*(1/length(yy))*(max(yy)-min(yy))* # (apply(matrix(exp(B11%*%(A1%*%CC)),ntt,1),2,sum))%*% # (apply(matrix(exp(P11%*%(A2%*%CC)),nxx,1),2,sum)) ) #return(-likelihood) ########################### # Fim da funçãoLikelihood # ########################### P<-PROUTOTENSORIAL(M,N)$P #int123<-function(t,x,y){ #exp( (B%*%(A1%*%CC))%*%(t( P%*%(A2%*%CC))) ) #} #INT123<-MC.integrate.3D(int123,low=c(tl,xl,yl),upp=c(tr,xr,yr),npoints=1500,exact.value=NULL) #INT123<-INT123$approx ################################ # Inicio da função Likelihood # ################################ #likelihood = (1/n)*(apply(matrix(exp( (B%*%(A1%*%CC))%*%(t( P%*%(A2%*%CC))) ) ,n1,1),2,sum)-INT123) #return(-likelihood) ########################### # Fim da funçãoLikelihood # ########################### } ing<-CHUTEINICIALalg1(ndt,ndx,ndy,t,x,y)$CC #semente CC<-ing out1<-nlminb(CC,fn1,t=t,x=x,y=y,P=P,control=list(iter.max = 1500,eval.max=5000,step.min=1e-3,x.tol=1e-1,maxit = 10000),fscale = length(x) ,lower = rep(0.0001,((k2*k3)+k1)), upper=rep(Inf,((k2*k3)+k1)) ) #out1<-nlm(fn1,CC,t=t,x=x,y=y,P=P,fscale = length(x) ) #out1<-nlminb(CC,fn1,t=t,x=x,y=y,OMEGA1=OMEGA1,F=F,P=P,,control=list(iter.max = #1500,eval.max=5000,step.min=1e-3,x.tol=1e-1,maxit = 10000),fscale = length(x), lower = #c( rep(0.0001,((k2*k3)+k1))), upper #=rep(Inf,((k2*k3)+k1)) ) #out1<-nlminb(allpha,fn1,CC=CC,t=t,x=x,y=y,OMEGA1=OMEGA1,F=F,P=P,control=list(iter.max = #150,eval.max=500,step.min=1e-3,x.tol=1e-1,maxit = 10000), lower = c( rep #(0.0001,((k2*k3)+k1))), upper =rep(Inf,((k2*k3)+k1)) ) #out1<-nlminb(ing,fn1,lower = c( rep(0.001,((k2*k3)+k1))), upper =rep(Inf,((k2*k3)+k1)) ) #itermax<-((k2*k3)+k1) #out1<-nlminb(ing,fn1,control = list(iter.max = itermax,step.min =2.2e-1 ), lower = c(min(t),min(x),min(y)), upper = #c(max(t),max(x),max(y))) #out1<-nlminb(ing,fn1,control = list(iter.max = itermax,step.min =2.2e-1 ), lower = min(t), upper = max(y) ) #out1<-optim(ing,fn1,control = list(maxit=itermax), method = "CG", lower = c(min(t),min(x),min(y)), upper = #c(max(t),max(x),max(y))) #out1<-optim(ing,fn1,control = list(maxit=100*itermax), method = "L-BFGS-B", lower = c(min(t),min(x),min(y)), upper = #c(max(t),max(x),max(y))) #out1<-optim(ing,fn1,control = list(maxit=100*itermax,trace=6), method = "L-BFGS-B" , lower = c(min(t),min(x),min(y)), upper #= c(max(t),max(x),max(y))) #out1<-optim(ing,fn1,control = list(maxit=100*itermax,trace=6) , method = "Nelder-Mead" , lower = c(min(t),min(x),min(y)), #upper = c(max(t),max(x),max(y))) #out1<-optim(ing,fn1,control = list(maxit=100*itermax) , method = "Nelder-Mead") #out1<-spg(ing,fn1, control = list(maxit=100*itermax,maximize=TRUE)) #out1<-optim(ing,fn1,control=list(maxit = itermax)) #Obtendo tetachapeu(alpha inicial) #out1<-optim(ing,fn1,control=list(ndeps=1e-3,reltol=1e-4,type=2,maxit = 10000)) #Obtendo tetachapeu(alpha inicial) #out1<-optim(ing,fn1,control=list(ndeps=1e-3,reltol=1e-4,type=2,maxit = 10000)) #Obtendo tetachapeu(alpha inicial) #estimate<-out1$par #out1<-nlm(fn1,ing) #estimate<-out1$estimate #out1<-nlminb(ing,fn1,lower = 0.0001, upper = Inf) #out1<-nlminb(ing,fn1,control=list(iter.max = 4)) #out1<-optim(ing,fn1) #Obtendo tetachapeu(alpha inicial) #out1<-optim(ing,fn1,control=list(maxit = 10000)) #Obtendo tetachapeu(alpha inicial) #out1<-optim(ing,fn1,control=list(ndeps=1e-3,reltol=1e-4,type=2,maxit = 10000)) #out1<-optim(ing,fn1,control=list(ndeps=1e-3,reltol=1e-4,type=2,maxit = 10000)) ) #estimate<-out1$par #out1<-nlm(fn1,ing,gradtol = 1e-3,steptol = 1e-3) #Obtendo tetachapeu(alpha inicial) list(out1=out1) } ############################ #Fim da função métrica alg2# ############################ ######################################################### #Para rodar a função basta colar os comandos abaixo no R# ######################################################### #m=4 #m=9 #m=29 m=146 #quantidade de superposições (use este valor) s<- composition(m)$s t<-s[1,] x<-s[2,] y<-s[3,] t<-as.numeric(t) x<-as.numeric(x) y<-as.numeric(y) quantidade<-s[4,1] n=length(y) tt<-seq( 0,1,length=n) xx<-seq(-4,10,length.out=n) yy<-seq(-5,15,length.out=n) lambda1<-funclambda1(n)$lambda1 lambda1<-matrix(lambda1,n,1) lambda23<-funclambda23(n)$lambda23 ################### #quantidade de nós# ################### ndtalg1<-(6) ndxalg1<-(16) ndyalg1<-ndxalg1 out1alg1<-metricaalg1(ndt=ndtalg1,ndx=ndxalg1,ndy=ndyalg1,t,x,y)$out1 estimatealg1<-out1alg1$par #estimatealg1<-out1alg1$estimate out1alg1 tt<-sort(tt) xx<-sort(xx) yy<-sort(yy) tl=min(tt) tr=max(tt) xl=min(xx) xr= max(xx) yl=min(yy) yr= max(yy) bdeg1= 3 bdeg2= 3 bdeg3= 3 B11alg1<-bsnorm1(tt, tl, tr, ndtalg1, bdeg1 )$B M11alg1<-bsnorm2(xx, xl, xr, ndxalg1, bdeg2 )$M N11alg1<-bsnorm3(yy, yl, yr, ndyalg1, bdeg3)$N mlec1alg1<-estimatealg1[1:dim(B11alg1)[2]] mlec2alg1<-estimatealg1[(dim(B11alg1)[2]+1):(dim(B11alg1)[2]+dim(M11alg1)[2]*dim(N11alg1)[2])] lambdachapeu1alg1<-((B11alg1%*%mlec1alg1))#/m lambdachapeu23alg1<-funclambdarecupera23(t,x,y,mlec2alg1,ndtalg1,ndxalg1,ndyalg1)$lambda23estimada contour(xx, yy, lambda23, col="black", lwd=7, labcex=1.1, lty="solid", method="edge", vfont=c("sans serif","bold"),drawlabels=TRUE,xlab="x",ylab="y", cex.axis = 1.7, cex.lab = 1.7, cex.sub = 1.7) # Add darker lines of second quantity: contour(xx, yy, lambdachapeu23alg1, add=TRUE, labcex=1.1, lwd=7, vfont=c("sans serif","bold"),drawlabels=TRUE,xlab="x",ylab="y", cex.axis = 1.7, cex.lab = 1.7, cex.sub = 1.7, method="edge", col="green", lty="solid") # Add a light grid: #grid(col="gray50") #points(x,y,cex=2.5,col = "red") title("Número esperado de pontos: n = 500", cex.main = 2.7) ######################################################################################################################### plot(tt, lambda1 , xlab="t", ylab=(expression(hat(lambda))[1]),type ="n", lty=1,col="red", cex = 1.7,lwd=7, bty="n", cex.axis = 1.7, cex.lab = 1.7, cex.sub = 1.7, main="Número esperado de pontos: n = 500", cex.main = 2.7) lines(tt, lambda1 , type ="l", lty=1,col="black", cex = 1.7,lwd=7) lines(tt,lambdachapeu1alg1, type ="l", lty=4,col="green", cex = 0.7,lwd=7) legend(locator(1), legend=c("Curva verdadeira","Log-verossimilhança não penalizada"), col=c("black","green"),lty=c(1,4),merge = TRUE,lwd=7) ########################################################################################################################