This document describes an in-class activity where students work in teams to optimize different bivariate functions using the optim() function in R. It provides instructions for students to claim a test function, plot it, and use the Nelder-Mead and simulated annealing methods to optimize the function. Several student examples are given, including optimizing the sphere, Rosenbrock, Beale's and Goldstein-Price functions. Errors are reported for some optimizations.
Giá 10k/ 5 lượt tải liên hệ page để mua https://www.facebook.com/garmentspace
Chỉ với 10k THẺ CÀO VIETTEL bạn có ngay 5 lượt download tài liệu bất kỳ do Garment Space upload, hoặc với 100k THẺ CÀO VIETTEL bạn được truy cập kho tài liệu chuyên ngành vô cùng phong phú
Liên hệ: www.facebook.com/garmentspace
Giá 10k/ 5 lượt tải liên hệ page để mua https://www.facebook.com/garmentspace
Chỉ với 10k THẺ CÀO VIETTEL bạn có ngay 5 lượt download tài liệu bất kỳ do Garment Space upload, hoặc với 100k THẺ CÀO VIETTEL bạn được truy cập kho tài liệu chuyên ngành vô cùng phong phú
Liên hệ: www.facebook.com/garmentspace
SA is a global optimization technique.
It distinguishes between different local optima.
It is a memory less algorithm & the algorithm does not use any information gathered during the search.
SA is motivated by an analogy to annealing in solids.
& it is an iterative improvement algorithm.
you need to complete the r code and a singlepage document c.pdfadnankhan605720
you need to complete the r code and a single-page document containing two figures, report the
parameters you estimate and discuss how well your power law fits the network data, and explain
the finding.
Question: images
incomplete r code:
# IDS 564 - Spring 2023
# Lab 4 R Code - Estimating the Degree Exponent of a Scale-free Network
#=========================================================================
=====================
# 0. INITIATION
==========================================================================
=
#=========================================================================
=====================
## You'll need VGAM for the zeta function
# install.packages("VGAM") ## When prompted to install from binary version, select no
library(VGAM)
## You'll need this when calculating goodness of fit
# install.packages("parallel")
library(parallel)
library(ggplot2)
library(ggthemes)
library(dplyr)
library(tidyr)
##------------------------------------------------------------------------------
## This function will calculate the zeta function for you. You don't need to worry about it! Run it
and continue.
## gen_zeta(gamma , shift) will give you a number
gen_zeta <- function (gamma, shift = 1, deriv = 0)
{
deriv.arg <- deriv
rm(deriv)
if (!is.Numeric(deriv.arg, length.arg = 1, integer.valued = TRUE))
stop("'deriv' must be a single non-negative integer")
if (deriv.arg < 0 || deriv.arg > 2)
stop("'deriv' must be 0, 1, or 2")
if (deriv.arg > 0)
return(zeta.specials(Zeta.derivative(gamma, deriv.arg = deriv.arg,
shift = shift), gamma, deriv.arg, shift))
if (any(special <- Re(gamma) <= 1)) {
ans <- gamma
ans[special] <- Inf
special3 <- Re(gamma) < 1
ans[special3] <- NA
special4 <- (0 < Re(gamma)) & (Re(gamma) < 1) & (Im(gamma) == 0)
# ans[special4] <- Zeta.derivative(gamma[special4], deriv.arg = deriv.arg, shift = shift)
special2 <- Re(gamma) < 0
if (any(special2)) {
gamma2 <- gamma[special2]
cgamma <- 1 - gamma2
ans[special2] <- 2^(gamma2) * pi^(gamma2 - 1) * sin(pi *
gamma2/2) * gamma(cgamma) * Recall(cgamma)
}
if (any(!special)) {
ans[!special] <- Recall(gamma[!special])
}
return(zeta.specials(ans, gamma, deriv.arg, shift))
}
aa <- 12
ans <- 0
for (ii in 0:(aa - 1)) ans <- ans + 1/(shift + ii)^gamma
ans <- ans + Zeta.aux(shape = gamma, aa, shift = shift)
ans[shift <= 0] <- NaN
zeta.specials(ans, gamma, deriv.arg = deriv.arg, shift = shift)
}
## example:
gen_zeta(2.1, 4)
##------------------------------------------------------------------------------
## The P_k (the CDF)
P_k = function(gamma, k, k_sat){
### fill the function
return(1 - ( gen_zeta(gamma, k) / ... ))
}
##------------------------------------------------------------------------------
my_theme <- theme_classic() +
theme(legend.position = "bottom", legend.box = "horizontal", legend.direction = "horizontal",
title = element_text(size = 18), axis.title = element_text(size = 14),
axis.text.y = element_text(size = 16), axis.text.x = element_text(size = 16),
strip.text = element_text(size.
#Covnet model had been defined class ConvNetNew(torch.nn.Module).pdfcomputersmartdwarka
#Covnet model had been defined
class ConvNetNew(torch.nn.Module):
def __init__(self):
super(ConvNetNew, self).__init__()
#############################################################################
#
# TODO: Complete the network #Note: similar as Task 1
#############################################################################
#
# Block 1: 3 x 175 x 300 --> 32 x 87 x 150
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 2: 32 x 87 x 150 --> 64 x 43 x 75
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 3: 64 x 43 x 75 --> 128 x 21 x 37
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 4: 128 x 21 x 37 --> 256 x 10 x 18
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(256)
self.relu4 = nn.ReLU()
self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
#### [3%] ####
# AdaptiveAvgPool: 1 x 1
self.avgpool = nn.AdaptiveAvgPool1d(1)
# Linear layers: 256 x 1 x 1 --> 128
self.fc1 = nn.Linear(256 * 1 * 1, 128)
# Dropout
self.dropout = nn.Dropout(0.5)
#############################################################################
#
# END OF YOUR CODE #
#############################################################################
#
def forward(self, x):
#############################################################################
#
# TODO: implement the fordward #Note: similar as Task 1
#############################################################################
#
# Block 1: 3 x 175 x 300 --> 32 x 87 x 150
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool1(x)
# Block 2: 32 x 87 x 150 --> 64 x 43 x 75
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.maxpool2(x)
# Block 3: 64 x 43 x 75 --> 128 x 21 x 37
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool3(x)
# Block 4: 128 x 21 x 37 --> 256 x 10 x 18
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.maxpool4(x)
#### [3%] ####
# AdaptiveAvgPool:
self.avgpool = nn.AdaptiveAvgPool1d(1)
# Flatten the output for the linear layers
x = x.view(x.size(0), -1)
# Linear layers: 256 x 1 x 1 --> 256
x = self.fc1(x)
x = self.relu1(x)
x = self.dropout(x)
#############################################################################
#
# END OF YOUR CODE #
#############################################################################
#
return x
#With the defined convolution layers (ConvNetNew()), the whole contrastive learning
framework could be constructed. The encoder_q and encoder_k have the same convolutional
layers. However, the encoder_k will not be optimized by the.
Emerging Languages: A Tour of the HorizonAlex Payne
A tour of a number of new programming languages, organized by the job they're best suited for. Presented at Philadelphia Emerging Technology for the Enterprise 2012.
SA is a global optimization technique.
It distinguishes between different local optima.
It is a memory less algorithm & the algorithm does not use any information gathered during the search.
SA is motivated by an analogy to annealing in solids.
& it is an iterative improvement algorithm.
you need to complete the r code and a singlepage document c.pdfadnankhan605720
you need to complete the r code and a single-page document containing two figures, report the
parameters you estimate and discuss how well your power law fits the network data, and explain
the finding.
Question: images
incomplete r code:
# IDS 564 - Spring 2023
# Lab 4 R Code - Estimating the Degree Exponent of a Scale-free Network
#=========================================================================
=====================
# 0. INITIATION
==========================================================================
=
#=========================================================================
=====================
## You'll need VGAM for the zeta function
# install.packages("VGAM") ## When prompted to install from binary version, select no
library(VGAM)
## You'll need this when calculating goodness of fit
# install.packages("parallel")
library(parallel)
library(ggplot2)
library(ggthemes)
library(dplyr)
library(tidyr)
##------------------------------------------------------------------------------
## This function will calculate the zeta function for you. You don't need to worry about it! Run it
and continue.
## gen_zeta(gamma , shift) will give you a number
gen_zeta <- function (gamma, shift = 1, deriv = 0)
{
deriv.arg <- deriv
rm(deriv)
if (!is.Numeric(deriv.arg, length.arg = 1, integer.valued = TRUE))
stop("'deriv' must be a single non-negative integer")
if (deriv.arg < 0 || deriv.arg > 2)
stop("'deriv' must be 0, 1, or 2")
if (deriv.arg > 0)
return(zeta.specials(Zeta.derivative(gamma, deriv.arg = deriv.arg,
shift = shift), gamma, deriv.arg, shift))
if (any(special <- Re(gamma) <= 1)) {
ans <- gamma
ans[special] <- Inf
special3 <- Re(gamma) < 1
ans[special3] <- NA
special4 <- (0 < Re(gamma)) & (Re(gamma) < 1) & (Im(gamma) == 0)
# ans[special4] <- Zeta.derivative(gamma[special4], deriv.arg = deriv.arg, shift = shift)
special2 <- Re(gamma) < 0
if (any(special2)) {
gamma2 <- gamma[special2]
cgamma <- 1 - gamma2
ans[special2] <- 2^(gamma2) * pi^(gamma2 - 1) * sin(pi *
gamma2/2) * gamma(cgamma) * Recall(cgamma)
}
if (any(!special)) {
ans[!special] <- Recall(gamma[!special])
}
return(zeta.specials(ans, gamma, deriv.arg, shift))
}
aa <- 12
ans <- 0
for (ii in 0:(aa - 1)) ans <- ans + 1/(shift + ii)^gamma
ans <- ans + Zeta.aux(shape = gamma, aa, shift = shift)
ans[shift <= 0] <- NaN
zeta.specials(ans, gamma, deriv.arg = deriv.arg, shift = shift)
}
## example:
gen_zeta(2.1, 4)
##------------------------------------------------------------------------------
## The P_k (the CDF)
P_k = function(gamma, k, k_sat){
### fill the function
return(1 - ( gen_zeta(gamma, k) / ... ))
}
##------------------------------------------------------------------------------
my_theme <- theme_classic() +
theme(legend.position = "bottom", legend.box = "horizontal", legend.direction = "horizontal",
title = element_text(size = 18), axis.title = element_text(size = 14),
axis.text.y = element_text(size = 16), axis.text.x = element_text(size = 16),
strip.text = element_text(size.
#Covnet model had been defined class ConvNetNew(torch.nn.Module).pdfcomputersmartdwarka
#Covnet model had been defined
class ConvNetNew(torch.nn.Module):
def __init__(self):
super(ConvNetNew, self).__init__()
#############################################################################
#
# TODO: Complete the network #Note: similar as Task 1
#############################################################################
#
# Block 1: 3 x 175 x 300 --> 32 x 87 x 150
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 2: 32 x 87 x 150 --> 64 x 43 x 75
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU()
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 3: 64 x 43 x 75 --> 128 x 21 x 37
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# Block 4: 128 x 21 x 37 --> 256 x 10 x 18
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
self.bn4 = nn.BatchNorm2d(256)
self.relu4 = nn.ReLU()
self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
#### [3%] ####
# AdaptiveAvgPool: 1 x 1
self.avgpool = nn.AdaptiveAvgPool1d(1)
# Linear layers: 256 x 1 x 1 --> 128
self.fc1 = nn.Linear(256 * 1 * 1, 128)
# Dropout
self.dropout = nn.Dropout(0.5)
#############################################################################
#
# END OF YOUR CODE #
#############################################################################
#
def forward(self, x):
#############################################################################
#
# TODO: implement the fordward #Note: similar as Task 1
#############################################################################
#
# Block 1: 3 x 175 x 300 --> 32 x 87 x 150
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool1(x)
# Block 2: 32 x 87 x 150 --> 64 x 43 x 75
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.maxpool2(x)
# Block 3: 64 x 43 x 75 --> 128 x 21 x 37
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool3(x)
# Block 4: 128 x 21 x 37 --> 256 x 10 x 18
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.maxpool4(x)
#### [3%] ####
# AdaptiveAvgPool:
self.avgpool = nn.AdaptiveAvgPool1d(1)
# Flatten the output for the linear layers
x = x.view(x.size(0), -1)
# Linear layers: 256 x 1 x 1 --> 256
x = self.fc1(x)
x = self.relu1(x)
x = self.dropout(x)
#############################################################################
#
# END OF YOUR CODE #
#############################################################################
#
return x
#With the defined convolution layers (ConvNetNew()), the whole contrastive learning
framework could be constructed. The encoder_q and encoder_k have the same convolutional
layers. However, the encoder_k will not be optimized by the.
Emerging Languages: A Tour of the HorizonAlex Payne
A tour of a number of new programming languages, organized by the job they're best suited for. Presented at Philadelphia Emerging Technology for the Enterprise 2012.
A tour of Python: slides from presentation given in 2012.
[Some slides are not properly rendered in SlideShare: the original is still available at http://www.aleksa.org/2015/04/python-presentation_7.html.]
Executive Directors Chat Leveraging AI for Diversity, Equity, and InclusionTechSoup
Let’s explore the intersection of technology and equity in the final session of our DEI series. Discover how AI tools, like ChatGPT, can be used to support and enhance your nonprofit's DEI initiatives. Participants will gain insights into practical AI applications and get tips for leveraging technology to advance their DEI goals.
A workshop hosted by the South African Journal of Science aimed at postgraduate students and early career researchers with little or no experience in writing and publishing journal articles.
June 3, 2024 Anti-Semitism Letter Sent to MIT President Kornbluth and MIT Cor...Levi Shapiro
Letter from the Congress of the United States regarding Anti-Semitism sent June 3rd to MIT President Sally Kornbluth, MIT Corp Chair, Mark Gorenberg
Dear Dr. Kornbluth and Mr. Gorenberg,
The US House of Representatives is deeply concerned by ongoing and pervasive acts of antisemitic
harassment and intimidation at the Massachusetts Institute of Technology (MIT). Failing to act decisively to ensure a safe learning environment for all students would be a grave dereliction of your responsibilities as President of MIT and Chair of the MIT Corporation.
This Congress will not stand idly by and allow an environment hostile to Jewish students to persist. The House believes that your institution is in violation of Title VI of the Civil Rights Act, and the inability or
unwillingness to rectify this violation through action requires accountability.
Postsecondary education is a unique opportunity for students to learn and have their ideas and beliefs challenged. However, universities receiving hundreds of millions of federal funds annually have denied
students that opportunity and have been hijacked to become venues for the promotion of terrorism, antisemitic harassment and intimidation, unlawful encampments, and in some cases, assaults and riots.
The House of Representatives will not countenance the use of federal funds to indoctrinate students into hateful, antisemitic, anti-American supporters of terrorism. Investigations into campus antisemitism by the Committee on Education and the Workforce and the Committee on Ways and Means have been expanded into a Congress-wide probe across all relevant jurisdictions to address this national crisis. The undersigned Committees will conduct oversight into the use of federal funds at MIT and its learning environment under authorities granted to each Committee.
• The Committee on Education and the Workforce has been investigating your institution since December 7, 2023. The Committee has broad jurisdiction over postsecondary education, including its compliance with Title VI of the Civil Rights Act, campus safety concerns over disruptions to the learning environment, and the awarding of federal student aid under the Higher Education Act.
• The Committee on Oversight and Accountability is investigating the sources of funding and other support flowing to groups espousing pro-Hamas propaganda and engaged in antisemitic harassment and intimidation of students. The Committee on Oversight and Accountability is the principal oversight committee of the US House of Representatives and has broad authority to investigate “any matter” at “any time” under House Rule X.
• The Committee on Ways and Means has been investigating several universities since November 15, 2023, when the Committee held a hearing entitled From Ivory Towers to Dark Corners: Investigating the Nexus Between Antisemitism, Tax-Exempt Universities, and Terror Financing. The Committee followed the hearing with letters to those institutions on January 10, 202
Model Attribute Check Company Auto PropertyCeline George
In Odoo, the multi-company feature allows you to manage multiple companies within a single Odoo database instance. Each company can have its own configurations while still sharing common resources such as products, customers, and suppliers.
Biological screening of herbal drugs: Introduction and Need for
Phyto-Pharmacological Screening, New Strategies for evaluating
Natural Products, In vitro evaluation techniques for Antioxidants, Antimicrobial and Anticancer drugs. In vivo evaluation techniques
for Anti-inflammatory, Antiulcer, Anticancer, Wound healing, Antidiabetic, Hepatoprotective, Cardio protective, Diuretics and
Antifertility, Toxicity studies as per OECD guidelines
Synthetic Fiber Construction in lab .pptxPavel ( NSTU)
Synthetic fiber production is a fascinating and complex field that blends chemistry, engineering, and environmental science. By understanding these aspects, students can gain a comprehensive view of synthetic fiber production, its impact on society and the environment, and the potential for future innovations. Synthetic fibers play a crucial role in modern society, impacting various aspects of daily life, industry, and the environment. ynthetic fibers are integral to modern life, offering a range of benefits from cost-effectiveness and versatility to innovative applications and performance characteristics. While they pose environmental challenges, ongoing research and development aim to create more sustainable and eco-friendly alternatives. Understanding the importance of synthetic fibers helps in appreciating their role in the economy, industry, and daily life, while also emphasizing the need for sustainable practices and innovation.
Read| The latest issue of The Challenger is here! We are thrilled to announce that our school paper has qualified for the NATIONAL SCHOOLS PRESS CONFERENCE (NSPC) 2024. Thank you for your unwavering support and trust. Dive into the stories that made us stand out!
1. Chapter 1
Optimization using
optim() in R
An in-class activity to apply Nelder-Mead and Simulated Annealing in
optim() for a variety of bivariate functions.
# SC1 4/18/2013
# Everyone optim()!
# The goal of this exercise is to minimize a function using R's optim().
# Steps:
# 0. Break into teams of size 1 or 2 students.
# 1. Each team will choose a unique function from this list:
# Test functions for optimization
# http://en.wikipedia.org/wiki/Test_functions_for_optimization
# 1a. Claim the function by typing your names into the function section below.
# 1b. Click on "edit" on Wikipedia page to copy latex math for function
# and paste between dollar signs $f(x)$
# 2. Following my "Sphere function" example:
# 2a. Define function()
# 2b. Plot the function
# 2c. Optimize (minimize) the function
# 2d. Comment on convergence
# 3. Paste your work into your function section.
# 4. I'll post this file on the website for us all to enjoy, as well as create
# a lovely pdf with images of the functions.
2. 2 Optimization using optim() in R
1.1 Sphere function
f(x) = n
i=1 x2
i
########################################
# Sphere function
# Erik Erhardt
# $f(boldsymbol{x}) = sum_{i=1}^{n} x_{i}^{2}$
# name used in plot below
f.name <- "Sphere function"
# define the function
f.sphere <- function(x) {
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
# calculate the function value for each row of x
f.x <- apply(x^2, 1, sum)
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-10, 10, length = 101)
x2 <- seq(-10, 10, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.sphere(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
3. 1.1 Sphere function 3
out.sphere <- optim(c(1,1), f.sphere, method = "Nelder-Mead")
out.sphere
## $par
## [1] 3.754e-05 5.179e-05
##
## $value
## [1] 4.092e-09
##
## $counts
## function gradient
## 63 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.sphere <- optim(c(1,1), f.sphere, method = "SANN")
out.sphere
## $par
## [1] 0.0001933 -0.0046280
##
## $value
## [1] 2.146e-05
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
###
# comments based on plot and out.*
4. 4 Optimization using optim() in R
# The unique minimum was found within tolerance.
## values of x1 and x2 at the minimum
# $par
# [1] 3.754010e-05 5.179101e-05
#
## value of the function at the minimum
# $value
# [1] 4.091568e-09
#
## convergence in 63 iterations
# $counts
# function gradient
# 63 NA
#
## 0 = convergence successful
# $convergence
# [1] 0
#
## no news is good news
# $message
# NULL
Sphere function
−10
−5
0
5
10 −10
−5
0
5
10
0
50
100
150
200
x1
x2
y
5. 1.2 Sphere function with stochastic noise 5
1.2 Sphere function with stochastic noise
########################################
# Sphere function with stochastic noise
# Christian Gunning
# name used in plot below
f.name <- "Sphere function with stochastic noise at each iteration"
# define the function
f.sphere1 <- function(x) {
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
# calculate the function value for each row of x
# f.x <- apply(x, 1, function(y) {ret<- sum(y^2) })
f.x <- apply(x, 1, function(y) {ret<- sum(y^2)+rnorm(1,mean=1,sd=abs(mean(y))^(1/10))}
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-10, 10, length = 101)
x2 <- seq(-10, 10, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.sphere1(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.NM <- optim(c(1,1), f.sphere1, method = "Nelder-Mead")
out.NM
6. 6 Optimization using optim() in R
## $par
## [1] 0.875 1.150
##
## $value
## [1] 0.2255
##
## $counts
## function gradient
## 321 NA
##
## $convergence
## [1] 10
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.sann <- optim(c(1,1), f.sphere1, method = "SANN")
out.sann
## $par
## [1] -0.7529 -0.3134
##
## $value
## [1] -1.036
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
7. 1.2 Sphere function with stochastic noise 7
Sphere function with stochastic noise at each iteration
−10
−5
0
5
10 −10
−5
0
5
10
0
50
100
150
200
x1
x2
y
8. 8 Optimization using optim() in R
1.3 Rosenbrock function
########################################
# Rosenbrock function
# Mary Rose Paiz
# name used in plot below
f.name <- "Rosenbrock Function"
# define the function
f.rosenbrock <- function(x) {
x1 <- x[,1]
x2 <- x[,2]
# calculating f.x
term1 <- (x2 - (x1)^2)^2
term2 <- (x1 - 1)^2
f.x <- (100*term1 + term2)
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-1.5, 2.0, length = 101)
x2 <- seq(.5, 3.0, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.rosenbrock(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -75, x = -50) # view position
)
9. 1.3 Rosenbrock function 9
# optimize (minimize) the function using Nelder-Mead
out.rosenbrock <- optim(c(1,1), f.rosenbrock, method = "Nelder-Mead")
## Error: incorrect number of dimensions
out.rosenbrock
## Error: object ’out.rosenbrock’ not found
# optimize (minimize) the function using Simulated Annealing
out.rosenbrock <- optim(c(1,1), f.rosenbrock, method = "SANN")
## Error: incorrect number of dimensions
out.rosenbrock
## Error: object ’out.rosenbrock’ not found
Rosenbrock Function
−1.5
−1.0
−0.5
0.0
0.5
1.0
1.5
2.0
0.5
1.0
1.5
2.0
2.5
3.0
200
400
600
800
1000
1200
x1
x2
y
10. 10 Optimization using optim() in R
1.4 Beale’s function
f(x, y) = (1.5 − x + xy)2
+ 2.25 − x + xy2 2
+ 2.625 − x + xy3 2
.
########################################
# Beale's function
# Alvaro
#$$f(x,y) = left( 1.5 - x + xy right)^{2} + left( 2.25 - x + xy^{2}right)^{2} + left(2.62
#Minimum:
#$$f(3, 0.5) = 0
#-4.5 le x,y le 4.5$$
# name used in plot below
f.name <- "Beale's function"
# define the function
f.beale <- function(mx) {
mx <- matrix(mx, ncol=2)
x<- mx[,1]
y<- mx[,2]
f.x<- (1.5 - x +x*y)^2 + (2.25-x+(x*y)^2)^2 + (2.625-x+(x*y)^3)^2
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-4.5, 4.5, length = 101)
x2 <- seq(-4.5, 4.5, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- log10(f.beale(X))
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
11. 1.4 Beale’s function 11
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = 0, x = 0) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.beale <- optim(c(1,1), f.beale, method = "Nelder-Mead")
out.beale
## $par
## [1] 2.4814 0.2284
##
## $value
## [1] 0.286
##
## $counts
## function gradient
## 83 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.beale <- optim(c(1,1), f.beale, method = "SANN")
out.beale
## $par
## [1] 2.4830 0.2269
##
## $value
## [1] 0.2861
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
12. 12 Optimization using optim() in R
###
# comments based on plot and out.*
# The unique minimum was found within tolerance.
Beale's function
−4 −2 0 2 4
−4
−2
0
2
4
0
2
4
6
x1
x2
y
13. 1.5 Goldstein-Price function 13
1.5 Goldstein-Price function
f(x, y) = 1 + (x + y + 1)2
19 − 14x + 3x2
− 14y + 6xy + 3y2
30 + (2x −
########################################
# Goldstein-Price function
# Barnaly Rashid
#GoldsteinPrice function:
#$f(x,y) = left(1+left(x+y+1right)^{2}left(19-14x+3x^{2}-14y+6xy+3y^{2}right)righ
f.name <- "Goldstein-Price function"
# define the function
f.goldprice <- function(x1x2) {
# calculate the function value for x1 and x2
x1x2 <- matrix(x1x2,ncol=2)
a <- 1+(x1x2[,1]+x1x2[,2]+1)^2*(19-14*x1x2[,1]+3*x1x2[,1]^2-14*x1x2[,2]+6*x1x2[,1]*x1x
b <- 30 + (2*x1x2[,1]-3*x1x2[,2])^2*(18-32*x1x2[,1]+12*x1x2[,1]^2+48*x1x2[,2]-36*x1x2[
f.x <- a*b
# return function value
return(f.x)
}
# matrix(x1x2,ncol=2)
#plot the function
# define ranges of x to plot over
x1 <- seq(-1.5, 1.5, length = 101)
x2 <- seq(-1.5, 1.5, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
#y <- f.goldprice(X[,1],X[,2])
y <- f.goldprice(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
library(lattice) # use the lattice package
14. 14 Optimization using optim() in R
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.gold <- optim(c(0,-1), f.goldprice, method = "Nelder-Mead")
out.gold
## $par
## [1] 0 -1
##
## $value
## [1] 3
##
## $counts
## function gradient
## 57 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.gold <- optim(c(0,-1), f.goldprice, method = "SANN")
out.gold
## $par
## [1] 0 -1
##
## $value
## [1] 3
##
## $counts
## function gradient
## 10000 NA
##
16. 16 Optimization using optim() in R
1.6 Booth’s function
f(x) = (x + 2y − 7)2
+ (2x + y − 5)2
########################################
# Booth's function
# Olga Vitkovskaya
# $f(boldsymbol{x}) = (x + 2y -7)^{2}+(2x + y -5)^{2}$
# name used in plot below
f.name <- "Booth's function"
# define the function
f.booths <- function(xy) {
# make x a matrix so this function works for plotting and for optimizing
xy <- matrix(xy, ncol=2)
# calculate the function value for each row of x
f.row <- function(this.row) {
(this.row[1] + 2 * this.row[2] -7)^2 + (2 * this.row[1] + this.row[2] -5)^2
}
f.x <- apply(xy, 1, f.row)
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x.plot <- seq(-10, 10, length = 101)
y.plot <- seq(-10, 10, length = 101)
grid.plot <- as.matrix(expand.grid(x.plot, y.plot))
colnames(grid.plot) <- c("x", "y")
# evaluate function
z.plot <- f.booths(grid.plot)
# put X, y and z values in a data.frame for plotting
df <- data.frame(grid.plot, z.plot)
# plot the function
library(lattice) # use the lattice package
17. 1.6 Booth’s function 17
p <- wireframe(z.plot ~ x * y # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
plot(p)
# optimize (minimize) the function using Nelder-Mead
out.booth1 <- optim(c(1,1), f.booths, method = "Nelder-Mead")
out.booth1
## $par
## [1] 0.9999 3.0001
##
## $value
## [1] 4.239e-08
##
## $counts
## function gradient
## 69 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.booth2 <- optim(c(1,1), f.booths, method = "SANN")
out.booth2
## $par
## [1] 1.004 3.003
##
## $value
## [1] 0.0002215
##
## $counts
## function gradient
## 10000 NA
##
18. 18 Optimization using optim() in R
## $convergence
## [1] 0
##
## $message
## NULL
Booth's function
−10
−5
0
5
10 −10
−5
0
5
10
0
500
1000
1500
2000
2500
x
y
z.plot
19. 1.7 Booth’s function 19
1.7 Booth’s function
f(x, y) = (x + 2y − 7)2
+ (2x + y − 5)2
.
########################################
# Booth's function
# {Katherine Freeland)
# Booth's Function: $f(x,y) = left( x + 2y -7right)^{2} + left(2x +y - 5right)^{2}.q
# Minimum: $f(1,3) = 0</math>, for <math>-10 le x,y le 10</math>.$
f.booth <- function(xy){
xy <- matrix(xy, ncol=2)
f.x <- ((xy[,1] + (2*xy[,2]) - 7)^2) + ((2*xy[,1]+ xy[,2]-5)^2)
return(f.x)
}
x <- seq(-5, 5, length=101)
y <- seq(-5, 5, length=101)
mat <- as.matrix(expand.grid(x, y))
colnames(mat) <- c("x", "y")
f.x <- f.booth(mat)
df <- data.frame(mat, f.x)
library(lattice) # use the lattice package
wireframe(f.x ~ x * y # f.x, x, and y axes to plot
, data = df # data.frame with values to plot
, main = "Booth Function" # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.booth <- optim(c(1,1), f.booth, method = "Nelder-Mead")
out.booth
## $par
## [1] 0.9999 3.0001
##
## $value
## [1] 4.239e-08
20. 20 Optimization using optim() in R
##
## $counts
## function gradient
## 69 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
out.booth2 <- optim(c(1,1), f.booth, method = "SANN")
out.booth2
## $par
## [1] 1.000 3.002
##
## $value
## [1] 1.658e-05
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
21. 1.7 Booth’s function 21
Booth Function
−4
−2
0
2
4 −4
−2
0
2
4
0
200
400
600
800
x
y
f.x
22. 22 Optimization using optim() in R
1.8 Bukin function N. 6
f(x, y) = 100 |y − 0.01x2| + 0.01 |x + 10|
########################################
# Bukin function N. 6
# {Zhanna G.}
# $f(x,y) = 100sqrt{left|y - 0.01x^{2}right|} + 0.01 left|x+10 right|$
f.name <- "Bukin_6 function"
# define the function
f.bukin <- function(xy) {
xy <- matrix(xy, ncol=2)
# calculate the function value for each row of x
f.xy <- 100*sqrt(abs(y-0.01*(x)^2)) + 0.01*abs(x+10)
# return function value
return(f.xy)
}
x <- seq(-15, -5, length = 101)
y <- seq(-3, 3, length = 101)
X <- as.matrix(expand.grid(x, y))
#X
colnames(X) <- c("x", "y")
Z <- f.bukin(X)
#Z
df <- data.frame(X, Z)
#head(df)
# plot the function
library(lattice) # use the lattice package
wireframe(Z ~ x * y # y, x, and z axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
23. 1.8 Bukin function N. 6 23
Bukin_6 function
−14
−12
−10
−8
−6
−3
−2
−1
0
1
2
3
50
100
150
200
x
y
Z
24. 24 Optimization using optim() in R
1.9 Ackley’s function
f(x, y) = −20 exp −0.2 0.5 (x2 + y2) −exp (0.5 (cos (2πx) + cos (2πy)))+
20 + e.
########################################
# Ackley's function
# Rob Hoy
# $<math>f(x,y) = -20expleft(-0.2sqrt{0.5left(x^{2}+y^{2}right)}right)-expleft(0.5left
# name used in plot below
f.name <- "Ackley's function"
# define the function
f.ackley <- function(X) {
m <- matrix(X, ncol=2)
# calculate the function value
t1 <- (-20*(exp(-.2*sqrt(.5*(m[,1]^2+m[,2]^2)))))
t2 <- (exp(.5*(cos(2*pi*m[,1]) + cos(2 * pi * m[,2]))))
z <- t1 - t2 + 20 + exp(1)
# return function value
return(z)
}
# define ranges of x and y to plot
x <- seq(-10, 10, length = 101)
y <- seq(-10, 10, length = 101)
# make x and y a matrix, plotting and opt.
X <- as.matrix(expand.grid(x, y))
colnames(X) <- c("x", "y")
# evaluate function
z <- f.ackley(X)
# Create dataframe for graphing
df.ack <-data.frame(X,z)
# plot the function
library(lattice) # use the lattice package
wireframe(z ~ x * y # z, x, and y axes to plot
, data = df.ack # data.frame with values to plot
, main = f.name # name the plot
25. 1.9 Ackley’s function 25
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.ackley1 <- optim(c(-1,1), f.ackley, method = "Nelder-Mead")
out.ackley1
## $par
## [1] -0.9685 0.9685
##
## $value
## [1] 3.574
##
## $counts
## function gradient
## 45 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.ackley2 <- optim(c(1,1), f.ackley, method = "SANN")
out.ackley2
## $par
## [1] 0.001159 0.003890
##
## $value
## [1] 0.01192
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
26. 26 Optimization using optim() in R
#The first one was faster, but it appears to me that the second one is actually the more accur
Ackley's function
−10
−5
0
5
10 −10
−5
0
5
10
5
10
15
x
y
z
27. 1.10 Matyas function 27
1.10 Matyas function
f(x, y) = 0.26 x2
+ y2
− 0.48xy.
########################################
# Matyas function
# Josh Nightingale
# $f(x,y) = 0.26 left( x^{2} + y^{2}right) - 0.48 xy.$
# name used in plot below
f.name <- "Matyas function"
# define the function
f.matyas <- function(XY) {
# make x a matrix so this function works for plotting and for optimizing
XY <- matrix(XY, ncol=2)
x <- XY[,1]
y <- XY[,2]
# calculate the function value for each row of x
f.xy <- (0.26 * (x^2 + y^2)) - (0.48 * x * y)
return(f.xy)
}
# plot the function
# define ranges of x to plot over and put into matrix
x <- seq(-10, 10, length = 101)
y <- seq(-10, 10, length = 101)
XY <- as.matrix(expand.grid(x, y))
colnames(XY) <- c("x", "y")
# evaluate function
z <- f.matyas(XY)
# put X and y values in a data.frame for plotting
df <- data.frame(XY, z)
# plot the function
library(lattice) # use the lattice package
wireframe(z ~ x * y # z, x, and y axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
#, screen = list(z = 3, x = 5) # view position
)
28. 28 Optimization using optim() in R
# optimize (minimize) the function using Nelder-Mead
out.matyas <- optim(c(1,1), f.matyas, method = "Nelder-Mead")
out.matyas
## $par
## [1] 8.526e-05 7.856e-05
##
## $value
## [1] 2.796e-10
##
## $counts
## function gradient
## 69 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.matyas <- optim(c(1,1), f.matyas, method = "SANN")
out.matyas
## $par
## [1] 0.02710 0.01713
##
## $value
## [1] 4.442e-05
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
29. 1.10 Matyas function 29
Matyas function
−10
−5
0
5
10
−10
−5
0
5
10
0
20
40
60
80
100
x
y
z
30. 30 Optimization using optim() in R
1.11 Levi function N. 13
f(x, y) = sin2
(3πx)+(x − 1)2
1 + sin2
(3πy) +(y − 1)2
1 + sin2
(2πy) .
########################################
# Levi function N. 13
# Claire L
# $f(x,y) = sin^{2}left(3pi xright)+left(x-1right)^{2}left(1+sin^{2}left(3pi yright)
# name used in plot below
f.name <- "Levi function"
# define the function
f.levi <- function(X) {
# make x a matrix so this function works for plotting and for optimizing
# x <- matrix(x, ncol=1)
# y <- matrix(y, ncol=1)
X <- matrix(X, ncol=2)
# calculate the function value for each row of x
f.xy <- (sin(3*pi*X[,1]))^2 + ((X[,1]-1)^2)*(1+(sin(3*pi*X[,2]))^2) + ((X[,2]-1)^2)*(1+(sin(
# return function value
return(f.xy)
}
# plot the function
# define ranges of x to plot over and put into matrix
x <- seq(-5, 5, length = 101)
y <- seq(-5, 5, length = 101)
X <- as.matrix(expand.grid(x, y))
colnames(X) <- c("x", "y")
# evaluate function
z <- f.levi(X)
# put X and y and z values in a data.frame for plotting
df <- data.frame(X,z)
# plot the function
#It works! :)
library(lattice) # use the lattice package
wireframe(z ~ x * y
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
31. 1.11 Levi function N. 13 31
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.levi <- optim(c(1,1), f.levi, method = "Nelder-Mead", )
out.levi
## $par
## [1] 1 1
##
## $value
## [1] 1.35e-31
##
## $counts
## function gradient
## 103 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.levi <- optim(c(1,1), f.levi, method = "SANN")
out.levi
## $par
## [1] 1 1
##
## $value
## [1] 1.35e-31
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
32. 32 Optimization using optim() in R
#optimize with lower and upper bounds.
out.levi <- optim(c(1,1), f.levi, method = "L-BFGS-B", lower=-1, upper=1)
out.levi
## $par
## [1] 1 1
##
## $value
## [1] 1.35e-31
##
## $counts
## function gradient
## 1 1
##
## $convergence
## [1] 0
##
## $message
## [1] "CONVERGENCE: NORM OF PROJECTED GRADIENT <= PGTOL"
Levi function
−4
−2
0
2
4 −4
−2
0
2
4
20
40
60
80
100
120
x
y
z
33. 1.12 Three-hump camel function 33
1.12 Three-hump camel function
f(x, y) = 2x2
− 1.05x4
+ x6
6 + xy + y2
########################################
# Three-hump camel function
# Mohammad
# Optimization
#$f(x,y) = 2x^{2} - 1.05x^{4} + frac{x^{6}}{6} + xy + y^{2}$
#$-5le x,y le 5$
# name used in plot below
f.name <- "Three-hump camel function"
# define the function
f.camel <- function(input) {
# make x a matrix so this function works for plotting and for optimizing
input <- matrix(input, ncol=2)
# calculate the function value for each row of x
f.x <- (2*input[,1]^2) - (1.05*input[,1]^4) + (input[,1]^6)/6 +
input[,1]*input[,2] + input[,2]^2;
# f.x <- apply(x^2, 1, sum)
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x <- seq(-5, 5, length = 101)
y <- seq(-5, 5, length = 101)
X <- as.matrix(expand.grid(x, y))
colnames(X) <- c("x", "y")
# evaluate function
z <- f.camel(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, z)
# plot the function
library(lattice) # use the lattice package
wireframe(z ~ x * y # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
34. 34 Optimization using optim() in R
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -30, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.camel <- optim(runif(2,-5,5), f.camel, method = "L-BFGS-B", lower=c(-5,-5),
upper=c(5,5))
out.camel
## $par
## [1] 6.440e-08 -1.416e-08
##
## $value
## [1] 7.583e-15
##
## $counts
## function gradient
## 12 12
##
## $convergence
## [1] 0
##
## $message
## [1] "CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH"
Three−hump camel function
−4
−2
0
2
4
−4
−2
0
2
4
0
500
1000
1500
2000
x
y
z
35. 1.13 Easom function 35
1.13 Easom function
f(x, y) = − cos(x) cos(y) exp(−((x − π)2
+ (y − π)2
))
########################################
# Easom function
# Maozhen Gong
#f(x,y)=-cos(x)cos(y)exp(-((x-pi)^2+(y-pi)^2))
f.name<-"Easom function"
#define the function
f.easom<-function(x){
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
# calculate the function value for each row of x
f.x<-apply(x,1,function(x) {-prod(cos(x)/exp((x-pi)^2))})
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-10, 10, length = 101)
x2 <- seq(-10, 10, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.easom(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
36. 36 Optimization using optim() in R
)
# optimize (minimize) the function using Nelder-Mead
out.sphere <- optim(c(3,3), f.easom, method = "Nelder-Mead")
out.sphere
## $par
## [1] 3.142 3.142
##
## $value
## [1] -1
##
## $counts
## function gradient
## 51 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.sphere <- optim(c(3,3), f.easom, method = "SANN")
out.sphere
## $par
## [1] 3 3
##
## $value
## [1] -0.9416
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
37. 1.13 Easom function 37
Easom function
−10
−5
0
5
10 −10
−5
0
5
10
−0.8
−0.6
−0.4
−0.2
0.0
x1
x2
y
38. 38 Optimization using optim() in R
1.14 Cross-in-tray function
########################################
# Cross-in-tray function
39. 1.15 Eggholder function 39
1.15 Eggholder function
f(x, y) = − (y + 47) sin y + x
2 + 47 − x sin |x − (y + 47)|
########################################
# Eggholder function
# Rogers F Silva
# $f(x,y) = - left(y+47right) sin left(sqrt{left|y + frac{x}{2}+47right|}right)
# Minimum: $f(512, 404.2319) = -959.6407$, for $-512le x,y le 512$.
# $f(boldsymbol{x}) = sum_{i=1}^{n} x_{i}^{2}$
# name used in plot below
f.name <- "Eggholder function"
# define the function
f.egg <- function(x) {
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
# calculate the function value for each row of x
x1 = x[,1];
x2 = x[,2];
f.x <- -(x2+47)*sin(sqrt(abs(x2+x1/2+47))) - x1*sin(sqrt(abs(x1-(x2+47))))
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-512, 512, length = 129)
x2 <- seq(-512, 512, length = 129)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.egg(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
41. 1.15 Eggholder function 41
## EXTENSION 75 -956.911104 -956.917960
## HI-REDUCTION 77 -956.915023 -956.917960
## LO-REDUCTION 79 -956.916157 -956.917960
## HI-REDUCTION 81 -956.917804 -956.917960
## HI-REDUCTION 83 -956.917950 -956.918158
## HI-REDUCTION 85 -956.917960 -956.918187
## HI-REDUCTION 87 -956.918158 -956.918205
## HI-REDUCTION 89 -956.918187 -956.918215
## LO-REDUCTION 91 -956.918205 -956.918221
## Exiting from Nelder Mead minimizer
## 93 function evaluations used
out.egg
## $par
## [1] 482.4 432.9
##
## $value
## [1] -956.9
##
## $counts
## function gradient
## 93 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.egg <- optim(c(500,400), f.egg, method = "SANN", control = list(trace = TRUE))
## sann objective function values
## initial value -846.569207
## iter 1000 value -965.388229
## iter 2000 value -976.124930
## iter 3000 value -976.861171
## iter 4000 value -976.910951
## iter 5000 value -976.910951
## iter 6000 value -976.910951
## iter 7000 value -976.910951
## iter 8000 value -976.910951
42. 42 Optimization using optim() in R
## iter 9000 value -976.910951
## iter 9999 value -976.910951
## final value -976.910951
## sann stopped after 9999 iterations
out.egg
## $par
## [1] 522.1 413.3
##
## $value
## [1] -976.9
##
## $counts
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
Eggholder function
−400
−200
0
200
400
−400
−200
0
200
400
−500
0
500
1000
x1
x2
y
43. 1.16 Holder table function 43
1.16 Holder table function
########################################
# Holder table function
44. 44 Optimization using optim() in R
1.17 McCormick function
########################################
# McCormick function
45. 1.18 Schaffer function N. 2 45
1.18 Schaffer function N. 2
f(x, y) = 0.5 +
sin2
(x2−y2
)−0.5
(1+0.001(x2+y2))
2 .
########################################
# Schaffer function N. 2
# Yonghua
# * Schaffer function N. 2:
# :: <math>f(x,y) = 0.5 + frac{sin^{2}left(x^{2} - y^{2}right) - 0.5}{left(1 + 0.00
# :Minimum: <math>f(0, 0) = 0</math>, for <math>-100le x,y le 100</math>.
f.name <- "Schaffer function No.2"
# define the function
f.shaffer2 <- function(x) {
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
f.x <- x
f.x <- cbind(x, rep(0,nrow(x)))
# calculate the function value for each row of x
#for (ii in 1:nrow(x)) {
# f.x[ii,3] <- 0.5 + (sin((f.x[ii,1])^2+(f.x[ii,2])^2)-0.5)/(1+0.001*((f.x[ii,1])^2+
ret.val <- 0.5 + (sin((f.x[,1])^2+(f.x[,2])^2)-0.5)/(1+0.001*((f.x[,1])^2+(f.x[,2]
# return function value
return(ret.val)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-100, 100, length = 101)
x2 <- seq(-100, 100, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.shaffer2(X)
#colnames(y) <- c("x1", "x2", "y")
46. 46 Optimization using optim() in R
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = -50, x = -70) # view position
)
# optimize (minimize) the function using Nelder-Mead
out.schaffer <- optim(c(100,100), f.shaffer2, method = "Nelder-Mead")
out.schaffer
## $par
## [1] 89.77 99.95
##
## $value
## [1] 0.4959
##
## $counts
## function gradient
## 85 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
# optimize (minimize) the function using Simulated Annealing
out.schaffer <- optim(c(100,100), f.shaffer2, method = "SANN")
out.schaffer
## $par
## [1] 90.6 102.6
##
## $value
## [1] 0.4961
##
## $counts
47. 1.18 Schaffer function N. 2 47
## function gradient
## 10000 NA
##
## $convergence
## [1] 0
##
## $message
## NULL
Schaffer function No.2
−100
−50
0
50
100−100
−50
0
50
100
−0.5
0.0
0.5
x1
x2
y
48. 48 Optimization using optim() in R
1.19 Schaffer function N. 4
########################################
# Schaffer function N. 4
49. 1.20 Styblinski-Tang function 49
1.20 Styblinski-Tang function
f(x) =
n
i=1 x4
i −16x2
i +5xi
2 .
########################################
# Styblinski-Tang function
# Kathy
# $f(boldsymbol{x}) = frac{sum_{i=1}^{n} x_{i}^{4} - 16x_{i}^{2} + 5x_{i}}{2}.quad$
f.name <- "Styblinski-Tang function"
f.styblinski <- function(x) {
# make x a matrix so this function works for plotting and for optimizing
x <- matrix(x, ncol=2)
# calculate the function value for each row of x
f.x <- (apply((x^4 - 16 * x^2 + 5 *x) , 1, sum))/2
# return function value
return(f.x)
}
# plot the function
# define ranges of x to plot over and put into matrix
x1 <- seq(-4.9, 5, length = 101)
x2 <- seq(-4.9, 5, length = 101)
X <- as.matrix(expand.grid(x1, x2))
colnames(X) <- c("x1", "x2")
# evaluate function
y <- f.styblinski(X)
# put X and y values in a data.frame for plotting
df <- data.frame(X, y)
# plot the function
library(lattice) # use the lattice package
wireframe(y ~ x1 * x2 # y, x1, and x2 axes to plot
, data = df # data.frame with values to plot
, main = f.name # name the plot
, shade = TRUE # make it pretty
, scales = list(arrows = FALSE) # include axis ticks
, screen = list(z = 50, x = -70) # view position
)
50. 50 Optimization using optim() in R
Styblinski−Tang function
−4
−2
0
2
4
−4
−2
0
2
4
−50
0
50
100
150
200
250
x1
x2
y