git-svn-id: https://svn.d4science.research-infrastructures.eu/gcube/trunk/data-analysis/wps@148306 82a268e6-3cf1-43bd-a215-b396298e98cf
This commit is contained in:
parent
34b8e85ffe
commit
52d39f1d93
|
@ -0,0 +1,36 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<classpath>
|
||||
<classpathentry kind="src" output="target/classes" path="src/main/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
|
||||
<attributes>
|
||||
<attribute name="optional" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry excluding="**" kind="src" output="target/test-classes" path="src/test/resources">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
|
||||
<attributes>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="output" path="target/classes"/>
|
||||
</classpath>
|
|
@ -0,0 +1,23 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>wps</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.jdt.core.javabuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.m2e.core.maven2Builder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.eclipse.jdt.core.javanature</nature>
|
||||
<nature>org.eclipse.m2e.core.maven2Nature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
|
@ -0,0 +1,5 @@
|
|||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
|
||||
org.eclipse.jdt.core.compiler.compliance=1.8
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.compiler.source=1.8
|
|
@ -0,0 +1,4 @@
|
|||
activeProfiles=
|
||||
eclipse.preferences.version=1
|
||||
resolveWorkspaceProjects=true
|
||||
version=1
|
|
@ -0,0 +1,4 @@
|
|||
gCube System - License
|
||||
------------------------------------------------------------
|
||||
|
||||
${gcube.license}
|
|
@ -0,0 +1,66 @@
|
|||
The gCube System - ${name}
|
||||
--------------------------------------------------
|
||||
|
||||
${description}
|
||||
|
||||
|
||||
${gcube.description}
|
||||
|
||||
${gcube.funding}
|
||||
|
||||
|
||||
Version
|
||||
--------------------------------------------------
|
||||
|
||||
${version} (${buildDate})
|
||||
|
||||
Please see the file named "changelog.xml" in this directory for the release notes.
|
||||
|
||||
|
||||
Authors
|
||||
--------------------------------------------------
|
||||
|
||||
* Lucio Lelii (lucio.lelii@isti.cnr.it), CNR, Italy
|
||||
|
||||
|
||||
Maintainers
|
||||
-----------
|
||||
|
||||
* Lucio Lelii (lucio.lelii@isti.cnr.it), CNR, Italy
|
||||
|
||||
|
||||
Download information
|
||||
--------------------------------------------------
|
||||
|
||||
Source code is available from SVN:
|
||||
${scm.url}
|
||||
|
||||
Binaries can be downloaded from the gCube website:
|
||||
${gcube.website}
|
||||
|
||||
|
||||
Installation
|
||||
--------------------------------------------------
|
||||
|
||||
Installation documentation is available on-line in the gCube Wiki:
|
||||
${gcube.wikiRoot}/RConnector
|
||||
|
||||
|
||||
Documentation
|
||||
--------------------------------------------------
|
||||
|
||||
Documentation is available on-line in the gCube Wiki:
|
||||
${gcube.wikiRoot}/RConnector
|
||||
|
||||
|
||||
Support
|
||||
--------------------------------------------------
|
||||
|
||||
Bugs and support requests can be reported in the gCube issue tracking tool:
|
||||
${gcube.issueTracking}
|
||||
|
||||
|
||||
Licensing
|
||||
--------------------------------------------------
|
||||
|
||||
This software is licensed under the terms you may find in the file named "LICENSE" in this directory.
|
|
@ -0,0 +1,26 @@
|
|||
<ReleaseNotes>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.2" date="2017-03-27">
|
||||
<Change>add user script is executed also when configfile.csv is not present</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.2" date="2017-02-22">
|
||||
<Change>porting to tabualr-data-cl 2.0.0</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.0" date="2016-10-03">
|
||||
<Change>porting to smartgears 2.0</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.0.1" date="2016-05-13">
|
||||
<Change>algorithm for cookie encryption modified to support RStudio server 0.99</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.0.0" date="2016-03-15">
|
||||
<Change>connect method with empty parameters added</Change>
|
||||
<Change>Authorization Framework integration</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.1-0-1"
|
||||
date="2015-5-20">
|
||||
<Change>Bug fixing</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.1-0-0"
|
||||
date="2014-11-02">
|
||||
<Change>First Release</Change>
|
||||
</Changeset>
|
||||
</ReleaseNotes>
|
|
@ -0,0 +1,32 @@
|
|||
<assembly
|
||||
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
|
||||
<id>servicearchive</id>
|
||||
<formats>
|
||||
<format>tar.gz</format>
|
||||
</formats>
|
||||
<baseDirectory>/</baseDirectory>
|
||||
<fileSets>
|
||||
<fileSet>
|
||||
<directory>${distroDirectory}</directory>
|
||||
<outputDirectory>/</outputDirectory>
|
||||
<useDefaultExcludes>true</useDefaultExcludes>
|
||||
<includes>
|
||||
<include>README</include>
|
||||
<include>LICENSE</include>
|
||||
<include>changelog.xml</include>
|
||||
<include>profile.xml</include>
|
||||
</includes>
|
||||
<fileMode>755</fileMode>
|
||||
<filtered>true</filtered>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
<files>
|
||||
<file>
|
||||
<source>target/${build.finalName}.${project.packaging}</source>
|
||||
<outputDirectory>/${artifactId}</outputDirectory>
|
||||
</file>
|
||||
|
||||
</files>
|
||||
</assembly>
|
|
@ -0,0 +1,6 @@
|
|||
<application mode="online">
|
||||
<name>DataMiner</name>
|
||||
<group>WPS</group>
|
||||
<version>1.0</version>
|
||||
<description>A service implementing a WPS provider in the D4Science e-Infrastructure</description>
|
||||
</application>
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ID />
|
||||
<Type>Service</Type>
|
||||
<Profile>
|
||||
<Description>${description}</Description>
|
||||
<Class>DataAnalysis</Class>
|
||||
<Name>${artifactId}</Name>
|
||||
<Version>1.0.0</Version>
|
||||
<Packages>
|
||||
<Software>
|
||||
<Name>${artifactId}</Name>
|
||||
<Version>${version}</Version>
|
||||
<MavenCoordinates>
|
||||
<groupId>${groupId}</groupId>
|
||||
<artifactId>${artifactId}</artifactId>
|
||||
<version>${version}</version>
|
||||
</MavenCoordinates>
|
||||
<Files>
|
||||
<File>${build.finalName}.jar</File>
|
||||
</Files>
|
||||
</Software>
|
||||
</Packages>
|
||||
</Profile>
|
||||
</Resource>
|
||||
|
|
@ -0,0 +1,157 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<web-app xmlns="http://java.sun.com/xml/ns/j2ee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"
|
||||
version="2.4">
|
||||
<display-name>52°North Web Processing Service, Git: 1665e1b7b2188755161d4f0f3a6acf562d0444e1 @ 2015-03-21 00:30:20</display-name>
|
||||
<description>A web processing framework supporting the OGC WPS 1.0.0 specification</description>
|
||||
|
||||
<!-- security-constraint>
|
||||
<web-resource-collection>
|
||||
<web-resource-name>My JSP</web-resource-name>
|
||||
<url-pattern>/webAdmin/index.jsp</url-pattern>
|
||||
<http-method>GET</http-method>
|
||||
<http-method>POST</http-method>
|
||||
</web-resource-collection>
|
||||
<auth-constraint>
|
||||
<role-name>wpsAdmin</role-name>
|
||||
</auth-constraint>
|
||||
<user-data-constraint>
|
||||
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
|
||||
</user-data-constraint-->
|
||||
<!-- /security-constraint>
|
||||
|
||||
<login-config>
|
||||
<auth-method>BASIC</auth-method>
|
||||
<realm-name>Basic Authentication</realm-name>
|
||||
</login-config>
|
||||
|
||||
<Security roles referenced by this web application -->
|
||||
<!-- >security-role>
|
||||
<description>The role that is required to log in to the Manager Application</description>
|
||||
<role-name>manager</role-name>
|
||||
</security-role-->
|
||||
|
||||
<!--filter>
|
||||
<filter-name>CommunicationSizeLogFilter</filter-name>
|
||||
<filter-class>org.n52.wps.server.CommunicationSizeLogFilter</filter-class>
|
||||
</filter-->
|
||||
<!--filter>
|
||||
<filter-name>CompressingFilter</filter-name>
|
||||
<filter-class>com.planetj.servlet.filter.compression.CompressingFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>debug</param-name>
|
||||
<param-value>false</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>statsEnabled</param-name>
|
||||
<param-value>true</param-value>
|
||||
</init-param>
|
||||
</filter>
|
||||
|
||||
<filter-mapping>
|
||||
<filter-name>CompressingFilter</filter-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</filter-mapping-->
|
||||
<!-->filter-mapping>
|
||||
<filter-name>CommunicationSizeLogFilter</filter-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</filter-mapping-->
|
||||
|
||||
<!-- <filter>
|
||||
<filter-name>ResponseURLFilter</filter-name>
|
||||
<filter-class>org.n52.wps.server.ResponseURLFilter</filter-class>
|
||||
</filter>
|
||||
<filter-mapping>
|
||||
<filter-name>ResponseURLFilter</filter-name>
|
||||
<url-pattern>*</url-pattern>
|
||||
</filter-mapping>-->
|
||||
|
||||
<servlet>
|
||||
<servlet-name>wpsServlet</servlet-name>
|
||||
<servlet-class>org.gcube.dataanalysis.wps.statisticalmanager.synchserver.web.WebProcessingService</servlet-class>
|
||||
<!--<servlet-class>org.n52.wps.server.WebProcessingService</servlet-class>-->
|
||||
<load-on-startup>0</load-on-startup>
|
||||
<init-param>
|
||||
<param-name>wps.config.file</param-name>
|
||||
<param-value>config/wps_config.xml</param-value>
|
||||
</init-param>
|
||||
</servlet>
|
||||
<servlet>
|
||||
<servlet-name>retrieveResultServlet</servlet-name>
|
||||
<servlet-class>org.n52.wps.server.RetrieveResultServlet</servlet-class>
|
||||
<load-on-startup>1</load-on-startup>
|
||||
</servlet>
|
||||
<servlet-mapping>
|
||||
<servlet-name>wpsServlet</servlet-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</servlet-mapping>
|
||||
<servlet-mapping>
|
||||
<servlet-name>retrieveResultServlet</servlet-name>
|
||||
<url-pattern>/RetrieveResultServlet</url-pattern>
|
||||
</servlet-mapping>
|
||||
<welcome-file-list>
|
||||
<welcome-file>/index.html</welcome-file>
|
||||
</welcome-file-list>
|
||||
|
||||
<!-- 52n Security -->
|
||||
<context-param>
|
||||
<param-name>security.config.validation</param-name>
|
||||
<param-value>false</param-value>
|
||||
<!--description>
|
||||
disables validation of the security-config.xml this is necessary
|
||||
because the MavenProject: org.n52.wps:52n-wps-webapp:3.3.0-SNAPSHOT @ D:\dev\GitHub4w\WPS\52n-wps-webapp\pom.xml mechanism works only if the validation is disabled.
|
||||
</description-->
|
||||
</context-param>
|
||||
|
||||
<filter>
|
||||
<filter-name>CORS</filter-name>
|
||||
<filter-class>com.thetransactioncompany.cors.CORSFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>cors.allowOrigin</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.allowGenericHttpRequests</param-name>
|
||||
<param-value>true</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.supportedMethods</param-name>
|
||||
<param-value>GET, POST, HEAD, PUT, DELETE, OPTIONS</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.supportedHeaders</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.exposedHeaders</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
</filter>
|
||||
<filter-mapping>
|
||||
<filter-name>CORS</filter-name>
|
||||
<url-pattern>/*</url-pattern>
|
||||
</filter-mapping>
|
||||
|
||||
<!--
|
||||
<filter>
|
||||
<filter-name>authn</filter-name> -->
|
||||
<!-- display-name>Authentication Chain Filter</display-name-->
|
||||
<!-- description>
|
||||
Delegates calls to AuthenticationChainFilter that is defined in the security-config.
|
||||
</description-->
|
||||
<!-- <filter-class>org.n52.security.service.config.support.SecurityConfigDelegatingServletFilter</filter-class>
|
||||
</filter>
|
||||
|
||||
<filter-mapping>
|
||||
<filter-name>authn</filter-name>
|
||||
<url-pattern>/webAdmin/*</url-pattern>
|
||||
</filter-mapping>
|
||||
|
||||
|
||||
<listener>
|
||||
<listener-class>org.n52.security.service.config.support.SecurityConfigContextListener</listener-class>
|
||||
</listener>
|
||||
|
||||
-->
|
||||
|
||||
</web-app>
|
|
@ -0,0 +1,725 @@
|
|||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.gcube.data-analysis</groupId>
|
||||
<artifactId>wps</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<name>WPS</name>
|
||||
|
||||
<packaging>war</packaging>
|
||||
|
||||
<properties>
|
||||
<webappDirectory>${project.basedir}/src/main/webapp/WEB-INF</webappDirectory>
|
||||
<distroDirectory>${project.basedir}/distro</distroDirectory>
|
||||
<securityversion52n>2.2-M2</securityversion52n>
|
||||
<geotools.version>8.7</geotools.version>
|
||||
<apache.ode.version>1.3.3</apache.ode.version>
|
||||
<apache.axis2.version>1.5.1</apache.axis2.version>
|
||||
<n52version>3.6.1</n52version>
|
||||
</properties>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>n52-releases</id>
|
||||
<name>52n Releases</name>
|
||||
<url>http://52north.org/maven/repo/releases</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>Apache</id>
|
||||
<name>Apache repository</name>
|
||||
<url>http://repo1.maven.org/maven2</url>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>geotools</id>
|
||||
<name>Geotools Repo</name>
|
||||
<url>http://download.osgeo.org/webdav/geotools</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>OpenGEO</id>
|
||||
<name>opengeo repository</name>
|
||||
<url>http://repo.opengeo.org</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.gcube.distribution</groupId>
|
||||
<artifactId>maven-smartgears-bom</artifactId>
|
||||
<version>LATEST</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>jcl-over-slf4j</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.gcube.dataanalysis</groupId>
|
||||
<artifactId>dataminer</artifactId>
|
||||
<version>[1.4.0-SNAPSHOT, 2.0.0-SNAPSHOT)</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>geotools</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
<type>pom</type>
|
||||
<scope>import</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-main</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-arcgrid</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools.xsd</groupId>
|
||||
<artifactId>gt-xsd-gml3</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-coverage</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-referencing</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools.xsd</groupId>
|
||||
<artifactId>gt-xsd-core</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>xml-apis-xerces</artifactId>
|
||||
<groupId>xml-apis</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>xml-apis</artifactId>
|
||||
<groupId>xml-apis</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools.xsd</groupId>
|
||||
<artifactId>gt-xsd-kml</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-xml</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-geotiff</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-epsg-hsql</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-shapefile</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-opengis</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-metadata</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools.xsd</groupId>
|
||||
<artifactId>gt-xsd-gml2</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-api</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.geotools</groupId>
|
||||
<artifactId>gt-geojson</artifactId>
|
||||
<version>${geotools.version}</version>
|
||||
</dependency>
|
||||
<!-- END import geotools dependencies -->
|
||||
<!-- START J2EE -->
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
<version>2.5</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet.jsp</groupId>
|
||||
<artifactId>jsp-api</artifactId>
|
||||
<scope>provided</scope>
|
||||
<version>2.1</version>
|
||||
</dependency>
|
||||
<!-- END J2EE -->
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>20.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
<version>3.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
<version>1.5</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-httpclient</groupId>
|
||||
<artifactId>commons-httpclient</artifactId>
|
||||
<version>3.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<!--<version>1.3.1</version> -->
|
||||
<version>2.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-lang</groupId>
|
||||
<artifactId>commons-lang</artifactId>
|
||||
<version>2.5</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
<version>4.2.1</version>
|
||||
<scope>compile</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpcore</artifactId>
|
||||
<version>4.2.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.derby</groupId>
|
||||
<artifactId>derby</artifactId>
|
||||
<version>10.2.2.0</version>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.abdera</groupId>
|
||||
<artifactId>abdera-bundle</artifactId>
|
||||
<version>1.1.2</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>xmlsec</artifactId>
|
||||
<groupId>xml-security</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>geronimo-activation_1.0.2_spec</artifactId>
|
||||
<groupId>org.apache.geronimo.specs</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-exec</artifactId>
|
||||
<version>1.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.neethi</groupId>
|
||||
<artifactId>neethi</artifactId>
|
||||
<version>2.0.4</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.woden</groupId>
|
||||
<artifactId>woden-api</artifactId>
|
||||
<version>1.0M8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ws.security</groupId>
|
||||
<artifactId>wss4j</artifactId>
|
||||
<version>1.5.6</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>bcprov-jdk14</artifactId>
|
||||
<groupId>bouncycastle</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.woodstox</groupId>
|
||||
<artifactId>wstx-asl</artifactId>
|
||||
<version>4.0.6</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>stax-api</artifactId>
|
||||
<groupId>stax</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.codehaus.woodstox</groupId>
|
||||
<artifactId>woodstox-core-asl</artifactId>
|
||||
<version>4.0.6</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>stax-api</artifactId>
|
||||
<groupId>stax</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>xalan</groupId>
|
||||
<artifactId>xalan</artifactId>
|
||||
<version>2.7.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>xml-apis</artifactId>
|
||||
<groupId>xml-apis</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>xerces</groupId>
|
||||
<artifactId>xercesImpl</artifactId>
|
||||
<version>2.7.1</version>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.xmlbeans</groupId>
|
||||
<artifactId>xmlbeans</artifactId>
|
||||
<version>2.4.0</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>stax-api</artifactId>
|
||||
<groupId>stax</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
<version>2.0</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jsr173_api</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>stax</groupId>
|
||||
<artifactId>stax-api</artifactId>
|
||||
<version>1.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.java.dev.stax-utils</groupId>
|
||||
<artifactId>stax-utils</artifactId>
|
||||
<version>20060502</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>jsr173-ri</artifactId>
|
||||
<groupId>com.bea.xml</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.mc</groupId>
|
||||
<artifactId>mc-runtime</artifactId>
|
||||
<version>1.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.mc</groupId>
|
||||
<artifactId>mc-schema</artifactId>
|
||||
<version>1.1</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>net.sourceforge.pjl-comp-filter</groupId>
|
||||
<artifactId>pjl-comp-filter</artifactId>
|
||||
<scope>runtime</scope>
|
||||
<version>1.6.4</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>joda-time</groupId>
|
||||
<artifactId>joda-time</artifactId>
|
||||
<version>2.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>edu.umn.gis</groupId>
|
||||
<artifactId>mapscript</artifactId>
|
||||
<version>6.0.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-io-geotools</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-algorithm-geotools</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-commons</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-commons</artifactId>
|
||||
<classifier>tests</classifier>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-io</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-io</artifactId>
|
||||
<classifier>tests</classifier>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-io-impl</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-server</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-algorithm</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-algorithm-impl</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-database</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-client-lib</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-sextante</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-grass</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-webadmin</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-ags</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-r</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-mc</artifactId>
|
||||
<version>${n52version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-config</artifactId>
|
||||
<version>1.2.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-xml-wps-v100</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.wps</groupId>
|
||||
<artifactId>52n-wps-ags-workspace</artifactId>
|
||||
<version>10.0.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.sensorweb</groupId>
|
||||
<artifactId>52n-xml-wps-v100</artifactId>
|
||||
<version>2.1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.security</groupId>
|
||||
<artifactId>52n-security-config</artifactId>
|
||||
<version>2.2-M2</version>
|
||||
<scope>compile</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.security</groupId>
|
||||
<artifactId>52n-security-authentication</artifactId>
|
||||
<version>2.2-M2</version>
|
||||
<scope>compile</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.n52.security</groupId>
|
||||
<artifactId>52n-security-authn-web</artifactId>
|
||||
<version>2.2-M2</version>
|
||||
<scope>compile</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<groupId>commons-logging</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<!-- START Sextante dependencies -->
|
||||
<dependency>
|
||||
<groupId>es.unex.sextante</groupId>
|
||||
<artifactId>sextante</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>es.unex.sextante</groupId>
|
||||
<artifactId>sextante_gui</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>es.unex.sextante</groupId>
|
||||
<artifactId>sextante_algorithms</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>es.unex.sextante</groupId>
|
||||
<artifactId>libMath</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>es.unex.sextante</groupId>
|
||||
<artifactId>sextante_gt27_bindings</artifactId>
|
||||
<version>1.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.beanshell</groupId>
|
||||
<artifactId>bsh</artifactId>
|
||||
<version>2.0b4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>jfree</groupId>
|
||||
<artifactId>jfreechart</artifactId>
|
||||
<version>1.0.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>trove</groupId>
|
||||
<artifactId>trove</artifactId>
|
||||
<version>1.0.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.kxml</groupId>
|
||||
<artifactId>kxml2</artifactId>
|
||||
<version>2.2.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>jep</groupId>
|
||||
<artifactId>jep</artifactId>
|
||||
<version>2.24</version>
|
||||
</dependency>
|
||||
<!-- END Sexttante dependencies -->
|
||||
<dependency>
|
||||
<groupId>org.apache.ode</groupId>
|
||||
<artifactId>ode-axis2</artifactId>
|
||||
<version>${apache.ode.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ode</groupId>
|
||||
<artifactId>ode-utils</artifactId>
|
||||
<version>${apache.ode.version}</version>
|
||||
</dependency>
|
||||
<!-- START R dependencies -->
|
||||
<dependency>
|
||||
<groupId>org.rosuda.REngine</groupId>
|
||||
<artifactId>Rserve</artifactId>
|
||||
<version>1.8.1</version>
|
||||
</dependency>
|
||||
<!-- <dependency> -->
|
||||
<!-- <groupId>org.rosuda</groupId> -->
|
||||
<!-- <artifactId>RserveEngine</artifactId> -->
|
||||
<!-- <version>0.6-8</version> -->
|
||||
<!-- </dependency> -->
|
||||
<!-- END R dependencies -->
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>jstl</artifactId>
|
||||
<version>1.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.thetransactioncompany</groupId>
|
||||
<artifactId>cors-filter</artifactId>
|
||||
<version>1.9.3</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<finalName>${artifactId}</finalName>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>2.3.2</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-war-plugin</artifactId>
|
||||
<version>2.4</version>
|
||||
<configuration>
|
||||
<warName>wps</warName>
|
||||
<failOnMissingWebXml>false</failOnMissingWebXml>
|
||||
<includeEmptyDirectories>true</includeEmptyDirectories> <!-- since 2.4 -->
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<version>2.6</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>copy-profile</id>
|
||||
<goals>
|
||||
<goal>copy-resources</goal>
|
||||
</goals>
|
||||
<phase>process-resources</phase>
|
||||
<configuration>
|
||||
<outputDirectory>${webappDirectory}</outputDirectory>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>${distroDirectory}</directory>
|
||||
<filtering>true</filtering>
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-assembly-plugin</artifactId>
|
||||
<version>2.2</version>
|
||||
<configuration>
|
||||
<descriptors>
|
||||
<descriptor>${distroDirectory}/descriptor.xml</descriptor>
|
||||
</descriptors>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>servicearchive</id>
|
||||
<phase>install</phase>
|
||||
<goals>
|
||||
<goal>single</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
||||
</project>
|
|
@ -0,0 +1,10 @@
|
|||
# WPS4R Webapp directory
|
||||
|
||||
WPS4R heavily relies on scripts and configuration files that are stored in the webapp directory to be accessible and changeable in deployed WPS.
|
||||
The sub-folders and their contents are as follows:
|
||||
|
||||
* ``/demo``: Browser demonstration clients (HTML, Javascript and image files).
|
||||
* ``/R_Datatype.conf``: Configuration of data types to be used in R script annotations.
|
||||
* ``/resources``: Resource files for the scripts. If a script defines names of resource files from this directory then these files are loaded to the R workspace before script execution.
|
||||
* ``/scripts``: The WPS4R script repository, see seperate README.md file.
|
||||
* ``/utils``: R scripts that are loaded into the R session that may contain configuration, session wide utility functions, or session variables.
|
|
@ -0,0 +1,12 @@
|
|||
# comma separated list for each mimetype
|
||||
# <annotation key>, <mimetype>, <format hint for input and output (e.g. file, textfile, zip)>
|
||||
rdata, application/rData, file
|
||||
rdata+Spatial, application/rData+Spatial, file
|
||||
rdata+SpatialPoints, application/rData+SpatialPoints, file
|
||||
rdata+SpatialPolygons, application/rData+SpatialPolygons, file
|
||||
text/html, text/html, file
|
||||
html, text/html, file
|
||||
json, application/json, file
|
||||
application/json, application/json, file
|
||||
csv, text/csv, file
|
||||
text/csv, text/csv, file
|
|
@ -0,0 +1,122 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>WPS4R - Image Rendering Demo</title>
|
||||
|
||||
<link href="http://52north.org/templates/52n/favicon.ico"
|
||||
rel="shortcut icon" type="image/x-icon" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
|
||||
<link rel="stylesheet"
|
||||
href="http://code.jquery.com/mobile/1.1.0/jquery.mobile-1.1.0.min.css" />
|
||||
|
||||
<script type="text/javascript"
|
||||
src="http://code.jquery.com/jquery-1.7.1.min.js"></script>
|
||||
<script type="text/javascript"
|
||||
src="http://code.jquery.com/mobile/1.1.0/jquery.mobile-1.1.0.min.js"></script>
|
||||
|
||||
<script type="text/javascript" src="imageRendering.js"></script>
|
||||
<script type="text/javascript" src="wps-client-common.js"></script>
|
||||
|
||||
<link rel="stylesheet" href="styles.css" type="text/css" />
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<div data-role="page" id="home" data-add-back-btn="true">
|
||||
|
||||
<div data-role="header">
|
||||
<h1>Live Time Series Plots</h1>
|
||||
</div>
|
||||
|
||||
<div data-role="content">
|
||||
<!-- http://www.w3.org/wiki/HTML/Elements/input/range -->
|
||||
<p>
|
||||
<label for="slider-days">Select the number of hours to view:</label>
|
||||
<input type="range" name="slider_hrs" id="slider-hours" value="12"
|
||||
min="1" max="72" />
|
||||
</p>
|
||||
<p>
|
||||
<label for="slider-days">Set the <a
|
||||
href="http://www.inside-r.org/r-doc/stats/loess">LOESS span</a>
|
||||
parameter:
|
||||
</label> <input type="range" name="slider_span" id="slider-loess-span"
|
||||
value="0.75" min="0" max="3" step="0.05" />
|
||||
</p>
|
||||
|
||||
<button name="submit" id="executeRequest" value="execute-request">Create
|
||||
time series plot</button>
|
||||
|
||||
<div id="plot"></div>
|
||||
<div id="resultLog"></div>
|
||||
|
||||
<p class="footer">
|
||||
These plots are powered by the 52°North using the WPS4R
|
||||
processing backend at <span id="serviceUrl"></span>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div data-role="footer" data-position="fixed">
|
||||
<a href="#about" data-role="button" data-icon="info">About</a>
|
||||
</div>
|
||||
</div>
|
||||
<!-- /start page -->
|
||||
|
||||
<div data-role="page" id="about" data-add-back-btn="true">
|
||||
<div data-role="header">
|
||||
<h1>52°North WPS4R</h1>
|
||||
</div>
|
||||
|
||||
<div data-role="content">
|
||||
<h2>Documentation</h2>
|
||||
<p>The 52°North Web Processing Service enables the deployment
|
||||
of geo-processes on the web in a standardized way. It features a
|
||||
pluggable architecture for processes and data encodings. The
|
||||
implementation is based on the current OpenGIS specification:
|
||||
05-007r7 .</p>
|
||||
<p>
|
||||
<em>WPS4R</em> extends the 52°North WPS with a processing
|
||||
backend for the <a href="http://www.r-project.org/">R</a>
|
||||
environment for statistical computing and graphics. This allows to
|
||||
expose any R script via a standardised service interface.
|
||||
</p>
|
||||
<ul>
|
||||
<li>WPS Specification: <a
|
||||
href="http://opengeospatial.org/standards/wps">OGC website</a>.
|
||||
</li>
|
||||
<li>52°North WPS Implementation: <a
|
||||
href="http://www.52north.org/wps">52N Geoprocessing Community
|
||||
website</a>.
|
||||
<li>WPS4R: See <a
|
||||
href="http://52north.org/communities/geoprocessing/wps/backends/52n-wps-r.html">Processing
|
||||
Backend</a> and <a
|
||||
href="https://wiki.52north.org/bin/view/Geostatistics/WPS4R">Wiki
|
||||
Page</a>.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Go to the <a href="../../index.html">WPS server welcome page</a> to
|
||||
find out more.
|
||||
</p>
|
||||
|
||||
<h2>Test Requests</h2>
|
||||
<ul>
|
||||
<li><a id="link_processdescription" href="http://localhost"
|
||||
title="Open process description" target="_blank">Process
|
||||
Description Request</a></li>
|
||||
<li><a
|
||||
href="../../WebProcessingService?Request=GetCapabilities&Service=WPS"
|
||||
title="Open capabilities document" target="_blank">GetCapabilities
|
||||
Request</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div data-role="footer" data-position="fixed">
|
||||
<a href="#home" data-role="button" data-icon="home">Home</a>
|
||||
</div>
|
||||
</div>
|
||||
<!-- /about page -->
|
||||
|
||||
</body>
|
||||
</html>
|
Binary file not shown.
After Width: | Height: | Size: 4.2 KiB |
|
@ -0,0 +1,172 @@
|
|||
|
||||
var offering = 'WASSERSTAND_ROHDATEN';
|
||||
var stationname = 'Bake';
|
||||
var processIdentifier = 'org.n52.wps.server.r.demo.timeseriesPlot';
|
||||
var outputIdentifier = 'timeseries_plot';
|
||||
|
||||
var requestPlot = function(requestedHours, requestedOffering, paramLoessSpan, requestedStationname) {
|
||||
var imageWidth = '700';
|
||||
var imageHeight = '500';
|
||||
var sosUrl = 'http://sensorweb.demo.52north.org/PegelOnlineSOSv2.1/sos';
|
||||
|
||||
var requestString = '<?xml version="1.0" encoding="UTF-8"?><wps:Execute service="WPS" version="1.0.0" xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">'
|
||||
+ '<ows:Identifier>'
|
||||
+ processIdentifier
|
||||
+ '</ows:Identifier>'
|
||||
+ '<wps:DataInputs>'
|
||||
+ '<wps:Input><ows:Identifier>offering_hours</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ requestedHours
|
||||
+ '</wps:LiteralData></wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input><ows:Identifier>sos_url</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ sosUrl
|
||||
+ '</wps:LiteralData></wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input>'
|
||||
+ '<ows:Identifier>offering_id</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ requestedOffering
|
||||
+ '</wps:LiteralData>'
|
||||
+ '</wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input>'
|
||||
+ '<ows:Identifier>offering_stationname</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ requestedStationname
|
||||
+ '</wps:LiteralData>'
|
||||
+ '</wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input>'
|
||||
+ '<ows:Identifier>loess_span</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ paramLoessSpan
|
||||
+ '</wps:LiteralData>'
|
||||
+ '</wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input>'
|
||||
+ '<ows:Identifier>image_width</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ imageWidth
|
||||
+ '</wps:LiteralData>'
|
||||
+ ' </wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '<wps:Input>'
|
||||
+ '<ows:Identifier>image_height</ows:Identifier>'
|
||||
+ '<ows:Title></ows:Title>'
|
||||
+ '<wps:Data>'
|
||||
+ '<wps:LiteralData>'
|
||||
+ imageHeight
|
||||
+ '</wps:LiteralData>'
|
||||
+ '</wps:Data>'
|
||||
+ '</wps:Input>'
|
||||
+ '</wps:DataInputs>'
|
||||
+ '<wps:ResponseForm>'
|
||||
+ '<wps:ResponseDocument>'
|
||||
+ '<wps:Output asReference="true">'
|
||||
//+ '<wps:Output asReference="false">'
|
||||
+ '<ows:Identifier>'
|
||||
+ outputIdentifier
|
||||
+ '</ows:Identifier>'
|
||||
+ '</wps:Output>'
|
||||
+ '</wps:ResponseDocument>'
|
||||
+ '</wps:ResponseForm>'
|
||||
+ '</wps:Execute>';
|
||||
|
||||
var requestXML = $.parseXML(requestString);
|
||||
var xmlstr = requestXML.xml ? requestXML.xml : (new XMLSerializer())
|
||||
.serializeToString(requestXML);
|
||||
|
||||
$("#resultLog").html(
|
||||
"<div class=\"info\">Sent request to " + serviceUrlString
|
||||
+ " :<br /><textarea>" + xmlstr + "</textarea><div>");
|
||||
|
||||
$.ajax({
|
||||
type : "POST",
|
||||
url : serviceUrlString, // "http://localhost:8080/wps/WebProcessingService",
|
||||
data : {
|
||||
request : xmlstr
|
||||
},
|
||||
cache : false,
|
||||
dataType : "xml",
|
||||
success : handleResponse
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
var showResponse = function(executeResponse) {
|
||||
var status = $(executeResponse).find("wps\\:Status");
|
||||
var statusText = $(status).find("wps\\:ProcessSucceeded").text();
|
||||
$("#resultLog").html("<div class=\"success\">" + statusText + "</div>");
|
||||
|
||||
$(executeResponse)
|
||||
.find("wps\\:Output")
|
||||
.each(
|
||||
function() {
|
||||
// check if the output is the desired image
|
||||
if ($(this).find("ows\\:Identifier").text() == outputIdentifier) {
|
||||
// alert("Found: " + outputIdentifier);
|
||||
|
||||
var title = $(this).find("ows\\:Title").text();
|
||||
|
||||
$(this).find("wps\\:Reference").each(
|
||||
function() {
|
||||
|
||||
var link = $(this).attr("href");
|
||||
// var mime_type = $(this)
|
||||
// .attr("mimeType");
|
||||
|
||||
if (beginsWith(link, "http://")) {
|
||||
$("#plot").html(
|
||||
"<img src='" + link
|
||||
+ "' alt='" + title
|
||||
+ "' />");
|
||||
}
|
||||
|
||||
$("#resultLog").append(
|
||||
"<div class=\"info\">" + link
|
||||
+ "</div>");
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
$(function() {
|
||||
|
||||
$("#executeRequest").click(function() {
|
||||
$("#plot").html("<!-- no data -->");
|
||||
|
||||
var hours = $("#slider-hours").val();
|
||||
var span = $("#slider-loess-span").val();
|
||||
|
||||
$("#resultLog").html("Hours: " + hours + " | Offering: " + offering + " | LOESS span: " + span);
|
||||
|
||||
requestPlot(hours, offering, span, stationname);
|
||||
});
|
||||
|
||||
$("#resultLog").ajaxError(
|
||||
function(event, request, settings, exception) {
|
||||
$("#resultLog").html(
|
||||
"<div class=\"warning\">Error Calling: " + settings.url
|
||||
+ "<br />HTPP Code: " + request.status
|
||||
+ "<br />Exception: " + exception + "</div>");
|
||||
});
|
||||
});
|
||||
|
||||
$(document).ready(function(){
|
||||
$("#link_processdescription").attr("href", "../../WebProcessingService?Request=DescribeSensor&Service=WPS&version=1.0.0&Identifier=" + processIdentifier);
|
||||
//alert($("#link_processdescription").attr("href"));
|
||||
});
|
Binary file not shown.
After Width: | Height: | Size: 4.3 KiB |
|
@ -0,0 +1,48 @@
|
|||
/* Using Knob Buttons: http://itweek.deviantart.com/art/Knob-Buttons-Toolbar-icons-73463960 */
|
||||
/* message boxes based on http://www.jankoatwarpspeed.com/post/2008/05/22/CSS-Message-Boxes-for-different-message-types.aspx */
|
||||
.info,.success,.warning,.error,.validation {
|
||||
border: 1px solid;
|
||||
margin: 10px 0px;
|
||||
padding: 15px 10px 15px 50px;
|
||||
background-repeat: no-repeat;
|
||||
background-position: 10px center;
|
||||
}
|
||||
|
||||
.info {
|
||||
color: #00529B;
|
||||
background-color: #BDE5F8;
|
||||
background-image: url('info.png');
|
||||
}
|
||||
|
||||
.success {
|
||||
color: #4F8A10;
|
||||
background-color: #DFF2BF;
|
||||
background-image: url('success.png');
|
||||
}
|
||||
|
||||
.warning {
|
||||
color: #9F6000;
|
||||
background-color: #FEEFB3;
|
||||
background-image: url('warning.png');
|
||||
}
|
||||
|
||||
.error {
|
||||
color: #D8000C;
|
||||
background-color: #FFBABA;
|
||||
background-image: url('error.png');
|
||||
}
|
||||
|
||||
.validation {
|
||||
color: #D63301;
|
||||
background-color: #FFCCBA;
|
||||
background-image: url('validation.png');
|
||||
}
|
||||
|
||||
.reporting-content .ui-slider-switch {
|
||||
width: 15em;
|
||||
}
|
||||
|
||||
.footer {
|
||||
color: #aaaaaa;
|
||||
font-size: 0.75em;
|
||||
}
|
Binary file not shown.
After Width: | Height: | Size: 4.3 KiB |
Binary file not shown.
After Width: | Height: | Size: 4.1 KiB |
Binary file not shown.
After Width: | Height: | Size: 4.5 KiB |
|
@ -0,0 +1,47 @@
|
|||
var urlIndex = window.location.href.lastIndexOf("/R/");
|
||||
var urlBasisString = window.location.href.substring(0, (urlIndex + 1));
|
||||
var serviceUrlString = urlBasisString + "WebProcessingService";
|
||||
|
||||
var handleResponse = function(data) {
|
||||
console.log("Got response: " + data);
|
||||
|
||||
var isError = $(data).find("ows\\:ExceptionReport").length > 0;
|
||||
if (isError) {
|
||||
console.log("ERROR response.");
|
||||
showError(data);
|
||||
} else {
|
||||
showResponse(data);
|
||||
}
|
||||
};
|
||||
|
||||
var showError = function(error) {
|
||||
// var xmlString = (new XMLSerializer()).serializeToString(error);
|
||||
// alert(xmlString);
|
||||
|
||||
var messages = "";
|
||||
$(error).find("ows\\:Exception").each(
|
||||
function() {
|
||||
|
||||
var text = $(this).find("ows\\:ExceptionText").text();
|
||||
var locator = $(this).attr("locator");
|
||||
|
||||
var errorMessage = "<p>Error: " + text + "<br />Locator: "
|
||||
+ locator + "</p>\n";
|
||||
messages = messages + errorMessage;
|
||||
});
|
||||
|
||||
$("#resultLog").html("<div class=\"error\">" + messages + "</div>");
|
||||
};
|
||||
|
||||
var beginsWith = function(string, pattern) {
|
||||
return (string.indexOf(pattern) === 0);
|
||||
};
|
||||
|
||||
var endsWith = function(string, pattern) {
|
||||
var d = string.length - pattern.length;
|
||||
return (d >= 0 && string.lastIndexOf(pattern) === d);
|
||||
};
|
||||
|
||||
$(document).ready(function() {
|
||||
$("#serviceUrl").html("<em>" + serviceUrlString + "</em>");
|
||||
});
|
Binary file not shown.
|
@ -0,0 +1,43 @@
|
|||
\NeedsTeXFormat{LaTeX2e}
|
||||
\ProvidesPackage{Sweave}{}
|
||||
|
||||
\RequirePackage{ifthen}
|
||||
\newboolean{Sweave@gin}
|
||||
\setboolean{Sweave@gin}{true}
|
||||
\newboolean{Sweave@ae}
|
||||
\setboolean{Sweave@ae}{true}
|
||||
|
||||
\DeclareOption{nogin}{\setboolean{Sweave@gin}{false}}
|
||||
\DeclareOption{noae}{\setboolean{Sweave@ae}{false}}
|
||||
\ProcessOptions
|
||||
|
||||
\RequirePackage{graphicx,fancyvrb}
|
||||
\IfFileExists{upquote.sty}{\RequirePackage{upquote}}{}
|
||||
|
||||
\ifthenelse{\boolean{Sweave@gin}}{\setkeys{Gin}{width=0.8\textwidth}}{}%
|
||||
\ifthenelse{\boolean{Sweave@ae}}{%
|
||||
\RequirePackage[T1]{fontenc}
|
||||
\RequirePackage{ae}
|
||||
}{}%
|
||||
|
||||
\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl}
|
||||
\DefineVerbatimEnvironment{Soutput}{Verbatim}{}
|
||||
\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl}
|
||||
|
||||
\ifdefined\Schunk%
|
||||
\message{\string Environment Schunk is already defined, stay with former definition}%
|
||||
\else
|
||||
\newenvironment{Schunk}{}{}%
|
||||
\fi
|
||||
|
||||
\newcommand{\Sconcordance}[1]{%
|
||||
\ifx\pdfoutput\undefined%
|
||||
\csname newcount\endcsname\pdfoutput\fi%
|
||||
\ifcase\pdfoutput\special{#1}%
|
||||
\else%
|
||||
\begingroup%
|
||||
\pdfcompresslevel=0%
|
||||
\immediate\pdfobj stream{#1}%
|
||||
\pdfcatalog{/SweaveConcordance \the\pdflastobj\space 0 R}%
|
||||
\endgroup%
|
||||
\fi}
|
|
@ -0,0 +1,331 @@
|
|||
% Copyright (C) 2012< by 52°North Initiative for Geospatial Open Source Software GmbH, Contact: info@52north.org
|
||||
% This document is licensed under Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0), see http://creativecommons.org/licenses/by-sa/3.0/ for details.
|
||||
% Author: Daniel Nuest (d.nuest@52north.org)
|
||||
\documentclass{article}
|
||||
|
||||
% page margins: http://www.sharelatex.com/learn/Page_size_and_margins
|
||||
\usepackage[a4paper, margin=2cm]{geometry}
|
||||
|
||||
\usepackage{graphicx, verbatim}
|
||||
\usepackage[utf8]{inputenc}
|
||||
\usepackage{hyperref}
|
||||
\usepackage{soul} % for \hl
|
||||
\usepackage{pbox} % for new line in table
|
||||
% \usepackage[section]{placeins}
|
||||
% \usepackage{placeins} % for \FloatBarrier
|
||||
|
||||
% http://stackoverflow.com/questions/1673942/latex-table-positioning
|
||||
\usepackage{float}
|
||||
\restylefloat{table}
|
||||
|
||||
\usepackage{listings}
|
||||
\lstloadlanguages{XML,Java,R}
|
||||
|
||||
\usepackage{courier}
|
||||
\lstset{breaklines=true,basicstyle=\ttfamily}
|
||||
|
||||
\begin{document}
|
||||
\SweaveOpts{concordance=TRUE}
|
||||
|
||||
% overall graphics size
|
||||
\setkeys{Gin}{width=.5\linewidth}
|
||||
|
||||
% http://www.math.montana.edu/~jimrc/classes/Rseminar/SweaveIntro.html
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\title{Pegelonline Datenbericht}
|
||||
\author{52$^\circ$North}
|
||||
\maketitle
|
||||
|
||||
%\section{Abstract}
|
||||
|
||||
%Dieses Dokument demonstriert eine transparente Berichtgenerierung mit Hilfe von Open Source Softwarekomponenten von 52$^\circ$North. Diese Datei wurde auf Basis von live Daten in einem 52$^\circ$North Web Processing Service\footnote{\url{http://52north.org/communities/geoprocessing/wps/}} (WPS) mit Hilfe von WPS4R\footnote{\url{{http://52north.org/communities/geoprocessing/wps/backends/52n-wps-r.html}} auf der Basis von R\footnote{\url{http://r-project.org/}}, \LaTeX{}\footnote{\url{http://www.latex-project.org/}} und Sweave\footnote{\url{http://www.statistik.lmu.de/~leisch/Sweave/}} generiert.
|
||||
|
||||
<<label=libraries, echo=FALSE, results=hide>>=
|
||||
library("sos4R")
|
||||
library("latticeExtra")
|
||||
library("quantreg")
|
||||
library(maps); library(mapdata); library(maptools);
|
||||
library("sp"); library("rgdal")
|
||||
@
|
||||
|
||||
|
||||
<<label=inputs, echo=FALSE, results=hide>>=
|
||||
# can be inputs later, to be defined in the script file as wps inputs
|
||||
|
||||
if(exists("tPeriod_days")) {
|
||||
cat("[pegel] Found param tPeriod_days: ", tPeriod_days, "\n")
|
||||
} else {
|
||||
tPeriod_days <- 1
|
||||
}
|
||||
|
||||
if(exists("offering_name")) {
|
||||
cat("[pegel] Found param offering_name: ", offering_name, "\n")
|
||||
} else {
|
||||
offering_name <- "WASSERSTAND_ROHDATEN"
|
||||
}
|
||||
|
||||
if(exists("procedure_filter")) {
|
||||
cat("[pegel] Found param procedure_filter: ", procedure_filter, "\n")
|
||||
} else {
|
||||
# http://sensorweb.demo.52north.org/PegelOnlineSOSv2.1/sos?REQUEST=GetCapabilities&SERVICE=SOS
|
||||
procedure_filter <- "*Wasserstand-Bake*"
|
||||
#procedure_filter <- "*Papenburg*"
|
||||
}
|
||||
|
||||
if(exists("process_description_url")) {
|
||||
cat("[pegel] Found process description URL: ", process_description_url, "\n")
|
||||
} else {
|
||||
process_description_url <- "N/A"
|
||||
}
|
||||
|
||||
cat("[pegel] Input values:\n\tdays=", tPeriod_days, "\n\toffering=", offering_name, "\n\tprocedure filter=", procedure_filter, "\n")
|
||||
@
|
||||
|
||||
<<label=helpers, echo=FALSE, results=HIDE>>=
|
||||
cleanName <- function(obj) {
|
||||
.name <- toString(obj)
|
||||
.cleaned <- gsub("_", " ", .name)
|
||||
return(.cleaned)
|
||||
}
|
||||
# to check the output use cat(cleanName(sensors))
|
||||
@
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
<<label=metadata_service, echo=FALSE, results=hide>>=
|
||||
#pegelsos <- SOS(url = "http://pegelonline.wsv.de/webservices/gis/gdi-sos")
|
||||
|
||||
converters <- SosDataFieldConvertingFunctions(
|
||||
"WASSERSTAND_ROHDATEN" = sosConvertDouble,
|
||||
"LUFTTEMPERATUR" = sosConvertDouble,
|
||||
"Wasserstand" = sosConvertDouble,
|
||||
"m+NN" = sosConvertDouble)
|
||||
|
||||
pegelsos <- SOS(url = "http://sensorweb.demo.52north.org/PegelOnlineSOSv2.1/sos",
|
||||
dataFieldConverters = converters)
|
||||
|
||||
procs <- sosProcedures(pegelsos)[[offering_name]]
|
||||
|
||||
if(exists("procedure_filter")) {
|
||||
filter <- procs %in% grep(procedure_filter, procs, value=TRUE)
|
||||
sensors <- subset(procs, filter)
|
||||
} else {
|
||||
sensors <- procs
|
||||
}
|
||||
cat("[pegel] sensors: ", toString(sensors), "\n")
|
||||
|
||||
|
||||
offering <- sosOfferings(pegelsos)[[offering_name]]
|
||||
observed_property <- sosObservedProperties(offering)[1]
|
||||
|
||||
# just use the first one
|
||||
sensor <- sensors[1]
|
||||
cat("[pegel] continuing with procedure: ", toString(sensor), "\n")
|
||||
sensor.sml <- describeSensor(sos = pegelsos, procedure = sensor)
|
||||
@
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Pegelanalyse für Messtation(en) \Sexpr{cleanName(sensor)}}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Metadaten}
|
||||
|
||||
% http://en.wikibooks.org/wiki/LaTeX/Tables
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\begin{tabular}{l|l}
|
||||
|
||||
\hline
|
||||
Service Title & \Sexpr{sosTitle(pegelsos)} \\
|
||||
Service Abstract & \Sexpr{sosAbstract(pegelsos)} \\
|
||||
Service Version & \Sexpr{sosVersion(pegelsos)} \\
|
||||
Service URL & \Sexpr{sosUrl(pegelsos)} \\
|
||||
Keywords & \Sexpr{cleanName(toString(pegelsos@capabilities@identification@keywords$Keywords.Keyword))} \\
|
||||
& \href{\Sexpr{sosCapabilitiesUrl(pegelsos)}}{Capabilities} \\
|
||||
\hline
|
||||
Provider Name & \Sexpr{sosServiceProvider(pegelsos)@providerName} \\
|
||||
Provider Site & \Sexpr{sosServiceProvider(pegelsos)@providerSite} \\
|
||||
\hline
|
||||
Offering ID & \Sexpr{cleanName(sosId(offering))} \\
|
||||
Offering Name & \Sexpr{sosName(offering)} \\
|
||||
Observed Property & \Sexpr{cleanName(observed_property)} \\
|
||||
\hline
|
||||
|
||||
\end{tabular}
|
||||
\caption{Service Metadaten}
|
||||
\label{tab:service_metadata}
|
||||
\end{table}
|
||||
% \FloatBarrier
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Messstation}
|
||||
|
||||
Der Sensor \Sexpr{cleanName(sensor)} befindet sich an den Koordinaten $\Sexpr{sosCoordinates(sensor.sml)}$. Die rohe Sensorbeschreibung lautet wie folgt.
|
||||
|
||||
%<<station,echo=TRUE,results=verbatim>>=
|
||||
%sensor.sml
|
||||
%@
|
||||
|
||||
% http://en.wikibooks.org/wiki/LaTeX/Tables
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\begin{tabular}{l|l}
|
||||
|
||||
\hline
|
||||
Station ID & \Sexpr{cleanName(sosId(sensor.sml))} \\
|
||||
Station Name & \Sexpr{cleanName(sosName(sensor.sml))} \\
|
||||
Station Description & \Sexpr{cleanName(sosAbstract(sensor.sml))} \\
|
||||
Observed Area & $ \Sexpr{toString(sosBoundedBy(sensor.sml))} $ (lon min, lat min, lon max, lat max)\footnote{Projektionsparameter: \Sexpr{cleanName(CRSargs(sosGetCRS(sensor.sml)))}} \\
|
||||
\hline
|
||||
|
||||
\end{tabular}
|
||||
\caption{Messstation Metadaten}
|
||||
\label{tab:station_metadata}
|
||||
\end{table}
|
||||
% \FloatBarrier
|
||||
|
||||
% TODO plot station location
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
<<label=plotstation, echo=FALSE, results=hide, fig=TRUE>>=
|
||||
|
||||
# CRS
|
||||
sensor.crs <- sosGetCRS(sensor.sml)
|
||||
worldHigh <- pruneMap(map(database = "worldHires", region = "Germany",
|
||||
plot = FALSE))
|
||||
worldHigh_Lines <- map2SpatialLines(worldHigh, proj4string = sensor.crs)
|
||||
plot(worldHigh_Lines, col = "grey50")
|
||||
proj4string(worldHigh_Lines)
|
||||
|
||||
sensor.point <- SpatialPoints(coords = sosCoordinates(sensor.sml),
|
||||
proj4string = sosGetCRS(sensor.sml))
|
||||
plot(sensor.point, cex=1, pch = 3, add = TRUE)
|
||||
|
||||
# data(world.cities)
|
||||
# map.cities(label = TRUE, pch = 19, col = "black")
|
||||
|
||||
map.axes()
|
||||
map.scale()
|
||||
title(main = paste0("Messstation ", sosName(sensor.sml)))
|
||||
|
||||
@
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\newpage
|
||||
\section{Analyse}
|
||||
|
||||
Im folgenden wird eine einfach Zeitreihenanalyse für die vorliegende Messstation durchgeführt.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Analyserahmen}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
<<label=metadata_analysis, echo=FALSE, results=hide>>=
|
||||
tPeriod.end <- Sys.time()
|
||||
tPeriod <- sosCreateEventTimeList(
|
||||
time = sosCreateTimePeriod(
|
||||
sos = pegelsos,
|
||||
begin = tPeriod.end - (3600 * 24 * tPeriod_days),
|
||||
end = tPeriod.end))
|
||||
cat("[pegel] time period: ", toString(tPeriod[[1]]), "\n")
|
||||
@
|
||||
|
||||
Die Analysen umfassen einen Zeitraum von \Sexpr{tPeriod_days} Tagen vor Erstellungszeit diese Dokumentes (\Sexpr{tPeriod.end}).
|
||||
|
||||
|
||||
% do analysis here so that result metadata can be used
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
<<label=request, echo=FALSE, results=hide>>=
|
||||
pegelObs <- getObservation(sos = pegelsos,
|
||||
observedProperty = observed_property,
|
||||
offering = offering,
|
||||
procedure = sensors,
|
||||
eventTime = tPeriod)
|
||||
|
||||
r1 <- sosResult(pegelObs[[1]])
|
||||
range(r1[[toString(observed_property)]])
|
||||
r1clean <- subset(r1, toString(observed_property) > 0)
|
||||
range(r1clean$Wasserstand)
|
||||
@
|
||||
|
||||
\begin{table}[h]
|
||||
\centering
|
||||
\begin{tabular}{l|l}
|
||||
|
||||
\hline
|
||||
Messgröße & \Sexpr{cleanName(names(r1))} \\
|
||||
Messeinheit & \Sexpr{cleanName(sosUOM(r1))} \\
|
||||
Zeitinterval & $ \Sexpr{range(r1clean$SamplingTime)[1]} $ bis $ \Sexpr{range(r1clean$SamplingTime)[2]} $ \\
|
||||
Werteintervall & $ \Sexpr{range(r1clean$Wasserstand)} $ \\
|
||||
Werteanzahl & $ \Sexpr{cleanName(dim(r1clean)[1])} $ \\
|
||||
\hline
|
||||
|
||||
\end{tabular}
|
||||
\caption{Metadaten der Messreihe(n)}
|
||||
\label{tab:data_metadata}
|
||||
\end{table}
|
||||
% \FloatBarrier
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
<<label=plotcode, echo=TRUE, results=hide>>=
|
||||
r1plot <- xyplot(r1clean$Wasserstand ~ r1clean$SamplingTime, r1clean, type = "l",
|
||||
col = "grey", xlab = "Time", ylab = "Wasserstand")
|
||||
|
||||
r1plot <- r1plot + layer(panel.quantile(x, y, tau = c(.95, .5, .05)))
|
||||
@
|
||||
|
||||
Die folgende Analyse wurde durchgeführt: Plot a quantile regression line with standard error bounds, using the quantreg package. This is based on the stat\_quantile function from ggplot2.
|
||||
|
||||
% Sweave cheat sheet: http://users.aims.ac.za/~davidw/David_Wakyiku_sweavecs.pdf
|
||||
\begin{figure}[h!]
|
||||
\centering
|
||||
|
||||
<<label=plot, echo=FALSE, fig=TRUE>>=
|
||||
r1plot
|
||||
@
|
||||
|
||||
% http://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions
|
||||
% \includegraphics[width=0.9\textwidth]{pegel-plot.pdf}
|
||||
|
||||
\caption{Werte für \Sexpr{cleanName(sensor)} mit Quantile-basierter Regressionslinie und Fehlerintervall.}
|
||||
\label{fig:QuantileRegression}
|
||||
\end{figure}
|
||||
|
||||
|
||||
% TODO more analysis, e.g. outlier detection, check for missing values, ...
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\newpage
|
||||
\section{Reproduzierbarkeit}
|
||||
|
||||
Der folgende Code und Laufzeitumgebung wurden zur generierung dieses Berichtes verwendet.
|
||||
|
||||
% http://stackoverflow.com/questions/4362747/print-the-sourced-r-file-to-an-appendix-using-sweave
|
||||
\subsection{Laufzeitumgebung}
|
||||
|
||||
<<SessionInforamtaion,echo=FALSE,results=tex>>=
|
||||
toLatex(sessionInfo())
|
||||
@
|
||||
|
||||
\subsection{Code}
|
||||
|
||||
Die originale \href{http://de.wikipedia.org/wiki/Sweave}{Sweave}-Datei dieses Dokumentes kann \href{resource_url_rnw_file}{hier} heruntergeladen werden. Sie enthält den gesamten Code der Analyse. Download der Prozessbeschreibung \href{\Sexpr{process_description_url}}{hier}.
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Kontakt}
|
||||
|
||||
Daniel Nüst, \href{mailto:d.nuest@52north.org}{d.nuest@52north.org}.
|
||||
|
||||
\subsection{Lizenz}
|
||||
|
||||
Dieses Dokument ist unter einer der Creative Commons Attribution-ShareAlike 3.0 Unported (CC-BY-SA 3.0) Lizenz veröffentlicht.
|
||||
|
||||
\begin{center}
|
||||
\includegraphics[width=60px]{cc-by-sa.png}
|
||||
\end{center}
|
||||
|
||||
|
||||
\end{document}
|
|
@ -0,0 +1,263 @@
|
|||
% SOURCE: http://users.stat.umn.edu/~geyer/Sweave/#exam
|
||||
\documentclass{article}
|
||||
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amscd}
|
||||
\usepackage[tableposition=top]{caption}
|
||||
\usepackage{ifthen}
|
||||
\usepackage[utf8]{inputenc}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{An Sweave Demo}
|
||||
\author{Charles J. Geyer}
|
||||
\maketitle
|
||||
|
||||
This is a demo for using the \verb@Sweave@ command in R. To
|
||||
get started make a regular \LaTeX\ file (like this one) but
|
||||
give it the suffix \verb@.Rnw@ instead of \verb@.tex@ and then
|
||||
turn it into a \LaTeX\ file (\verb@foo.tex@) with the (unix) command
|
||||
\begin{verbatim}
|
||||
R CMD Sweave foo.Rnw
|
||||
\end{verbatim}
|
||||
So you can do
|
||||
\begin{verbatim}
|
||||
latex foo
|
||||
xdvi foo
|
||||
\end{verbatim}
|
||||
and so forth.
|
||||
|
||||
So now we have a more complicated file chain
|
||||
$$
|
||||
\begin{CD}
|
||||
\texttt{foo.Rnw}
|
||||
@>\texttt{Sweave}>>
|
||||
\texttt{foo.tex}
|
||||
@>\texttt{latex}>>
|
||||
\texttt{foo.dvi}
|
||||
@>\texttt{xdvi}>>
|
||||
\text{view of document}
|
||||
\end{CD}
|
||||
$$
|
||||
and what have we accomplished other than making it twice as annoying
|
||||
to the WYSIWYG crowd (having to run both \verb@Sweave@ and \verb@latex@
|
||||
to get anything that looks like the document)?
|
||||
|
||||
Well, we can now include R in our document. Here's a simple example
|
||||
<<two>>=
|
||||
2 + 2
|
||||
@
|
||||
What I actually typed in \verb@foo.Rnw@ was
|
||||
\begin{tabbing}
|
||||
\verb@<<two>>=@ \\
|
||||
\verb@2 + 2@ \\
|
||||
\verb+@+ \\
|
||||
\end{tabbing}
|
||||
This is not \LaTeX. It is a ``code chunk'' to be processed by \verb@Sweave@.
|
||||
When \verb@Sweave@ hits such a thing, it processes it, runs R to get the
|
||||
results, and stuffs (by default) the output in the \LaTeX\ file it is
|
||||
creating. The \LaTeX\ between code chunks is copied verbatim (except
|
||||
for \verb@Sexpr@, about which see below). Hence to create a Rnw document
|
||||
you just write plain old \LaTeX\ interspersed with ``code chunks'' which
|
||||
are plain old R.
|
||||
|
||||
\pagebreak[3]
|
||||
Plots get a little more complicated. First we make something to plot
|
||||
(simulate regression data).
|
||||
<<reg>>=
|
||||
n <- 50
|
||||
x <- seq(1, n)
|
||||
a.true <- 3
|
||||
b.true <- 1.5
|
||||
y.true <- a.true + b.true * x
|
||||
s.true <- 17.3
|
||||
y <- y.true + s.true * rnorm(n)
|
||||
out1 <- lm(y ~ x)
|
||||
summary(out1)
|
||||
@
|
||||
(for once we won't show the code chunk itself, look at \verb@foo.Rnw@
|
||||
if you want to see what the actual code chunk was).
|
||||
|
||||
Figure~\ref{fig:one} (p.~\pageref{fig:one})
|
||||
is produced by the following code
|
||||
<<label=fig1plot,include=FALSE>>=
|
||||
plot(x, y)
|
||||
abline(out1)
|
||||
@
|
||||
\begin{figure}
|
||||
\begin{center}
|
||||
<<label=fig1,fig=TRUE,echo=FALSE>>=
|
||||
<<fig1plot>>
|
||||
@
|
||||
\end{center}
|
||||
\caption{Scatter Plot with Regression Line}
|
||||
\label{fig:one}
|
||||
\end{figure}
|
||||
Note that \verb@x@, \verb@y@, and \verb@out1@ are remembered from
|
||||
the preceding code chunk. We don't have to regenerate them.
|
||||
All code chunks are part of one R ``session''.
|
||||
|
||||
Now this was a little tricky. We did this with two code chunks,
|
||||
one visible and one invisible. First we did
|
||||
\begin{tabbing}
|
||||
\verb@<<label=fig1plot,include=FALSE>>=@ \\
|
||||
\verb@plot(x, y)@ \\
|
||||
\verb@abline(out1)@ \\
|
||||
\verb+@+
|
||||
\end{tabbing}
|
||||
where the \verb@include=FALSE@ indicates that the output (text and graphics)
|
||||
should not go here (they will be some place else) and the \verb@label=fig1plot@
|
||||
gives the code chunk a name (to be used later). And ``later'' is almost
|
||||
immediate. Next we did
|
||||
\begin{tabbing}
|
||||
\verb@\begin{figure}@ \\
|
||||
\verb@\begin{center}@ \\
|
||||
\verb@<<label=fig1,fig=TRUE,echo=FALSE>>=@ \\
|
||||
\verb@<<fig1plot>>@ \\
|
||||
\verb+@+ \\
|
||||
\verb@\end{center}@ \\
|
||||
\verb@\caption{Scatter Plot with Regression Line}@ \\
|
||||
\verb@\label{fig:one}@ \\
|
||||
\verb@\end{figure}@
|
||||
\end{tabbing}
|
||||
In this code chunk the \verb@fig=TRUE@ indicates that the chunk
|
||||
generates a figure. \verb@Sweave@ automagically makes both EPS and PDF
|
||||
files for the figure and automagically generates an
|
||||
appropriate \LaTeX\ \verb@\includegraphics@ command
|
||||
to include the plot in the \verb@figure@ environment.
|
||||
The \verb@echo=FALSE@ in the code chunk means just what it says
|
||||
(we've already seen the code---it was produced by the preceding chunk---and
|
||||
we don't want to see it again, especially not in our figure).
|
||||
The \verb@<<fig1plot>>@ is an example of ``code chunk reuse''.
|
||||
It means that we reuse the code of the code chunk named \verb@fig1plot@.
|
||||
It is important that we observe the DRY/SPOT rule (\emph{don't repeat yourself}
|
||||
or \emph{single point of truth}) and only have one bit of code for generating
|
||||
the plot. What the reader sees is guaranteed to be the code that made the
|
||||
plot. If we had used cut-and-paste, just repeating the code, the duplicated
|
||||
code might get out of sync after edits.
|
||||
The rest of this should be recognizable to anyone who has ever
|
||||
done a \LaTeX\ figure.
|
||||
|
||||
So making a figure is a bit more complicated in some ways but much simpler
|
||||
in others. Note the following virtues
|
||||
\begin{itemize}
|
||||
\item The figure is guaranteed to be the one described by the text
|
||||
(at least by the R in the text).
|
||||
\item No messing around with sizing or rotations. It just works!
|
||||
\end{itemize}
|
||||
|
||||
\begin{figure}
|
||||
\begin{center}
|
||||
<<label=fig2,fig=TRUE,echo=FALSE>>=
|
||||
out3 <- lm(y ~ x + I(x^2) + I(x^3))
|
||||
plot(x, y)
|
||||
curve(predict(out3, newdata=data.frame(x=x)), add = TRUE)
|
||||
@
|
||||
\end{center}
|
||||
\caption{Scatter Plot with Cubic Regression Curve}
|
||||
\label{fig:two}
|
||||
\end{figure}
|
||||
Note that if you don't care to show the R code to make the figure,
|
||||
it is simpler still. Figure~\ref{fig:two} (p.~\pageref{fig:two})
|
||||
shows another plot.
|
||||
What I actually typed in \verb@foo.Rnw@ was
|
||||
\begin{tabbing}
|
||||
\verb@\begin{figure}@ \\
|
||||
\verb@\begin{center}@ \\
|
||||
\verb@<<label=fig2,fig=TRUE,echo=FALSE>>=@ \\
|
||||
\verb@out3 <- lm(y ~ x + I(x^2) + I(x^3))@ \\
|
||||
\verb@plot(x, y)@ \\
|
||||
\verb@curve(predict(out3, newdata=data.frame(x=x)), add = TRUE)@ \\
|
||||
\verb+@+ \\
|
||||
\verb@\end{center}@ \\
|
||||
\verb@\caption{Scatter Plot with Cubic Regression Curve}@ \\
|
||||
\verb@\label{fig:two}@ \\
|
||||
\verb@\end{figure}@
|
||||
\end{tabbing}
|
||||
Now we just included the code for the plot in the figure
|
||||
(with \verb@echo=FALSE@ so it doesn't show).
|
||||
|
||||
Also note that every time we rerun \verb@Sweave@ Figures~\ref{fig:one}
|
||||
and~\ref{fig:two} change, the latter conspicuously (because the simulated
|
||||
data are random). Everything
|
||||
just works. This should tell you the main virtue of Sweave.
|
||||
It's always correct. There is never a problem with stale
|
||||
cut-and-paste.
|
||||
|
||||
<<foo,echo=FALSE,results=hide>>=
|
||||
options(scipen=10)
|
||||
@
|
||||
Simple numbers can be plugged into the text with the \verb@\Sexpr@
|
||||
command, for example, the quadratic and cubic regression coefficients
|
||||
in the preceding regression were
|
||||
$\beta_2 = \Sexpr{round(out3$coef[3], 4)}$
|
||||
and
|
||||
$\beta_3 = \Sexpr{round(out3$coef[4], 4)}$.
|
||||
Just magic!
|
||||
What I actually typed in \verb@foo.Rnw@ was
|
||||
\begin{tabbing}
|
||||
\verb@in the preceding regression@ \\
|
||||
\verb@were $\beta_2 = \Se@\verb@xpr{round(out3$coef[3], 4)}$@ \\
|
||||
\verb@and $\beta_3 = \Se@\verb@xpr{round(out3$coef[4], 4)}$.@
|
||||
\end{tabbing}
|
||||
<<foo2,echo=FALSE,results=hide>>=
|
||||
options(scipen=0)
|
||||
@
|
||||
|
||||
The \verb@xtable@ command is used to make tables. (The following
|
||||
is the \verb@Sweave@ of another code chunk that we don't explicitly
|
||||
show. Look at \verb@foo.Rnw@ for details.)
|
||||
<<blurfle>>=
|
||||
out2 <- lm(y ~ x + I(x^2))
|
||||
foo <- anova(out1, out2, out3)
|
||||
foo
|
||||
class(foo)
|
||||
dim(foo)
|
||||
foo <- as.matrix(foo)
|
||||
foo
|
||||
@
|
||||
So now we are ready to turn the matrix \verb@foo@
|
||||
into Table~\ref{tab:one}
|
||||
<<label=tab1,echo=FALSE,results=tex>>=
|
||||
library(xtable)
|
||||
print(xtable(foo, caption = "ANOVA Table", label = "tab:one",
|
||||
digits = c(0, 0, 2, 0, 2, 3, 3)), table.placement = "tbp",
|
||||
caption.placement = "top")
|
||||
@
|
||||
using the R chunk
|
||||
\begin{tabbing}
|
||||
\verb@<<label=tab1,echo=FALSE,results=tex>>=@ \\
|
||||
\verb@library(xtable)@ \\
|
||||
\verb@print(xtable(foo, caption = "ANOVA Table", label = "tab:one",@ \\
|
||||
\verb@ digits = c(0, 0, 2, 0, 2, 3, 3)), table.placement = "tbp",@ \\
|
||||
\verb@ caption.placement = "top")@ \\
|
||||
\verb+@+
|
||||
\end{tabbing}
|
||||
(note the difference between arguments to the \verb@xtable@ function
|
||||
and to the \verb@xtable@ method of the \verb@print@ function).
|
||||
|
||||
To summarize, \verb@Sweave@ is terrific, so important that soon
|
||||
we'll not be able to get along without it. It's virtues are
|
||||
\begin{itemize}
|
||||
\item The numbers and graphics you report are actually what they
|
||||
are claimed to be.
|
||||
\item Your analysis is reproducible. Even years later, when you've
|
||||
completely forgotten what you did, the whole write-up, every single
|
||||
number or pixel in a plot is reproducible.
|
||||
\item Your analysis actually works---at least in this particular instance.
|
||||
The code you show actually executes without error.
|
||||
\item Toward the end of your work, with the write-up almost done you
|
||||
discover an error. Months of rework to do? No! Just fix the error
|
||||
and rerun \verb@Sweave@ and \verb@latex@. One single problem like
|
||||
this and you will have all the time invested in \verb@Sweave@ repaid.
|
||||
\item This methodology provides discipline.
|
||||
There's nothing that will make you clean up your code like
|
||||
the prospect of actually revealing it to the world.
|
||||
\end{itemize}
|
||||
|
||||
Whether we're talking about homework, a consulting report, a textbook,
|
||||
or a research paper. If they involve computing and statistics,
|
||||
this is the way to do it.
|
||||
|
||||
\end{document}
|
||||
|
|
@ -0,0 +1 @@
|
|||
42.0
|
|
@ -0,0 +1 @@
|
|||
17
|
|
@ -0,0 +1 @@
|
|||
This is a dummy txt-file
|
Binary file not shown.
After Width: | Height: | Size: 2.4 KiB |
|
@ -0,0 +1,10 @@
|
|||
# WPS4R Script Repository
|
||||
|
||||
This directory stores the R scripts that are made available through WPS4R.
|
||||
|
||||
The files provided in this directory fall into the following categories:
|
||||
|
||||
* **Test scripts**: Files starting with ``test_``. These scripts are used to check that WPS4R works properly and are also used in integration tests - edit with care!
|
||||
* **Demo scripts**: Files starting with ``demo_``. Demo scripts should work out of the box but may use any CRAN package. They demonstrate the variety of features that WPS4R suports.
|
||||
* **Spatial demo scripts**: Files starting with ``geo_``. Demo scripts that show specifically spatial data as input and output values.
|
||||
* **Other scripts**: No specific naming scheme. These script are related to specific projects or utility functions.
|
|
@ -0,0 +1,79 @@
|
|||
#input / output variables equally named to describeprocess document
|
||||
#input should be initialized before running this script
|
||||
|
||||
library("sp")
|
||||
library("gstat")
|
||||
library("rgdal")
|
||||
library("intamap")
|
||||
|
||||
###############################################################################
|
||||
# create a test input dataset based on the meuse dataset
|
||||
#wps.off;
|
||||
data("meuse")
|
||||
coordinates(meuse) <- ~ x+y
|
||||
proj4string(meuse) <- CRS("+init=epsg:28992")
|
||||
|
||||
data("meuse.grid")
|
||||
coordinates(meuse.grid) <- ~x+y
|
||||
proj4string(meuse.grid) <- CRS("+init=epsg:28992")
|
||||
gridded(meuse.grid) <- TRUE
|
||||
|
||||
setwd("d:/"); getwd()
|
||||
# http://spatial-analyst.net/book/system/files/GstatIntro.pdf
|
||||
writeOGR(meuse, ".", "meuse", "ESRI Shapefile")
|
||||
#wps.on;
|
||||
|
||||
###############################################################################
|
||||
# log function
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[demo.idw] ", Sys.time(), " > ", ..., "\n"))
|
||||
}
|
||||
|
||||
myLog("Start script... ")
|
||||
|
||||
###############################################################################
|
||||
#wps.des: id = demo.idw, title = Inverse Distance Interpolation in R,
|
||||
# abstract = Calculates Inverse Distance Interpolation for
|
||||
# given point values on a specified grid;
|
||||
|
||||
#wps.in: points, type = application/x-zipped-shp, title = measurement points,
|
||||
# abstract = Points for IDW, minOccurs = 0, maxOccurs=1;
|
||||
|
||||
#wps.in: maxdist, type = double, value = Inf, title = maximum distance
|
||||
# abstract = Only observations within a distance of maxdist
|
||||
# from the prediction location are used for prediction;
|
||||
|
||||
#wps.in: nmax, type = integer, value = Inf, title = number of observations
|
||||
# abstract = Maximum number of nearest observations that should be used for prediction;
|
||||
|
||||
#wps.in: attributename, string;
|
||||
|
||||
#wps.off;
|
||||
attributename <- "zinc"
|
||||
ogrInfo("meuse.shp", layer = "meuse")
|
||||
#points <- readOGR("meuse.shp", layer = "meuse")
|
||||
points <- "meuse.shp"
|
||||
nmax <- 23
|
||||
maxdist <- Inf
|
||||
#wps.on;
|
||||
|
||||
layername <- sub(".shp","", points) # just use the file name as the layer name
|
||||
inputPoints <- readOGR(points, layer = layername)
|
||||
summary(inputPoints)
|
||||
|
||||
f <- formula(paste(attributename, "~ 1"))
|
||||
myLog("Using this formula: ", toString(f))
|
||||
|
||||
gridpoints = SpatialPoints(makegrid(inputPoints),
|
||||
proj4string = CRS(proj4string(inputPoints)))
|
||||
grid = SpatialPixels(gridpoints)
|
||||
myLog("Interpolation output grid:")
|
||||
summary(grid)
|
||||
|
||||
idw <- idw(formula = f, locations = inputPoints, newdata = grid,
|
||||
maxdist = maxdist, nmax = nmax)
|
||||
summary(idw)
|
||||
|
||||
idwImage <- writeGDAL(idw, fn = "output.tiff")
|
||||
#wps.out: idwImage, type = geotiff, title = the interpolated raster,
|
||||
# abstract = interpolation output as rasterfile in GeoTIFF format;
|
|
@ -0,0 +1,22 @@
|
|||
# wps.des: demo.image, title = demo image process generating a plot of the Meuse dataset;
|
||||
|
||||
library("sp")
|
||||
data(meuse)
|
||||
coordinates(meuse) <- ~x+y
|
||||
|
||||
# wps.in: parameter, string, data variable,
|
||||
# abstract = the data variable to plot: one of {copper / lead / zinc / elev},
|
||||
# value = zinc;
|
||||
|
||||
#wps.off;
|
||||
parameter <- "zinc"
|
||||
setwd(tempdir())
|
||||
#wps.on;
|
||||
|
||||
image <- "output.png"
|
||||
png(file = image)
|
||||
spplot(meuse, parameter, main = paste0("Meuse dataset, variable: ", parameter), sub = toString(Sys.time()))
|
||||
graphics.off()
|
||||
cat("Saved image ", image, " in ", getwd())
|
||||
|
||||
# wps.out: image, png;
|
|
@ -0,0 +1,418 @@
|
|||
library("jsonlite")
|
||||
library("sp")
|
||||
library("intamap")
|
||||
library("lattice")
|
||||
|
||||
################################################################################
|
||||
# test input dataset
|
||||
#wps.off;
|
||||
data <- "test_data.json"
|
||||
testData <- '{
|
||||
|
||||
"values": [
|
||||
{
|
||||
"coords": [
|
||||
7.2044419705086735,
|
||||
51.266086785330224
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1371064950000,
|
||||
"value": 20.3
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.365665095102475,
|
||||
51.14334954184367
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1371064050000,
|
||||
"value": 19.6
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.100552165082708,
|
||||
51.087732979584395
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1370921850000,
|
||||
"value": 10
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.320281676860971,
|
||||
51.22548556899834
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372980150000,
|
||||
"value": 16.1
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.40096795458942,
|
||||
51.17103846368366
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372720950000,
|
||||
"value": 13.1
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.0602992201626185,
|
||||
51.0966135017852
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372979250000,
|
||||
"value": 16.7
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.107294690952992,
|
||||
51.225894610660866
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1371065850000,
|
||||
"value": 21.4
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.299968716891246,
|
||||
51.200402890202945
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372994550000,
|
||||
"value": 15.8
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.186580962308821,
|
||||
51.06958203108305
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372909500000,
|
||||
"value": 13.9
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.530828579490191,
|
||||
51.09836102312226
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379323350000,
|
||||
"value": 9.9
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.179744487634198,
|
||||
51.06765508608393
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372923450000,
|
||||
"value": 16.1
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.399410121794452,
|
||||
51.17167137941307
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379324250000,
|
||||
"value": 11.5
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.283090752705482,
|
||||
51.090121367969026
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372995450000,
|
||||
"value": 15.3
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.184148868629406,
|
||||
51.218726535948775
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379326950000,
|
||||
"value": 10
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.226317896922862,
|
||||
51.19657532705112
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372908600000,
|
||||
"value": 14.1
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.430406594086035,
|
||||
51.13625558025584
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1372981950000,
|
||||
"value": 15.5
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.305941659811078,
|
||||
51.063698821453556
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379327400000,
|
||||
"value": 11.4
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.239976260754382,
|
||||
51.075121231895494
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379247750000,
|
||||
"value": 14.3
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.557398313117449,
|
||||
51.08136930185558
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379320650000,
|
||||
"value": 9.7
|
||||
}
|
||||
},
|
||||
{
|
||||
"coords": [
|
||||
7.557398313111111,
|
||||
51.08136930188888
|
||||
],
|
||||
"lastValue": {
|
||||
"timestamp": 1379320650000,
|
||||
"value": 9.42
|
||||
}
|
||||
}
|
||||
],
|
||||
"phenomenon": "3",
|
||||
"bounds": {
|
||||
"_southWest": {
|
||||
"lat": 50.77033932897995,
|
||||
"lng": 6.87744140625
|
||||
},
|
||||
"_northEast": {
|
||||
"lat": 51.55572834577049,
|
||||
"lng": 7.738494873046875
|
||||
}
|
||||
},
|
||||
"pixelBounds": {
|
||||
"min": {
|
||||
"x": 136080,
|
||||
"y": 87113
|
||||
},
|
||||
"max": {
|
||||
"x": 136707,
|
||||
"y": 88025
|
||||
}
|
||||
}
|
||||
}'
|
||||
write(x = testData, file = data)
|
||||
#wps.on;
|
||||
|
||||
################################################################################
|
||||
# log function
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[demo.jsclient] ", Sys.time(), " | ", ..., "\n"))
|
||||
}
|
||||
myLog("Start script... ")
|
||||
|
||||
################################################################################
|
||||
#wps.des: id = demo.interpolation.jsclient, title = Interpolation,
|
||||
# abstract = Interpolation of environmental observatoins from Javascript client;
|
||||
|
||||
################################################################################
|
||||
# input
|
||||
|
||||
#wps.in: data, type = application/json, title = measurement points and metadata,
|
||||
# abstract = Locations and values for interpolation as well as the name of the
|
||||
# observed property and the bounding box, minOccurs = 0, maxOccurs=1;
|
||||
|
||||
#wps.in: type, type = string, title = plot type,
|
||||
# abstract = set whether 'mean' or 'variance' of the interpolation is plotted,
|
||||
# value = mean, minOccurs = 0, maxOccurs=1;
|
||||
#wps.off;
|
||||
type <- "mean"
|
||||
#wps.on;
|
||||
|
||||
plotSwitch <- 1
|
||||
if(type == "variance")
|
||||
plotSwitch <- 2
|
||||
myLog("Plotting ", type, " - so switch is set to ", plotSwitch)
|
||||
|
||||
#wps.in: cellNumber, type = integer, title = number of prediction cells,
|
||||
# abstract = the number of grid cells used for the output grid,
|
||||
# value = 20000, minOccurs = 0, maxOccurs=1;
|
||||
#wps.off;
|
||||
cellNumber <- 12000
|
||||
#wps.on;
|
||||
|
||||
##wps.in: observedProperty, type = string, title = observed property name,
|
||||
## abstract = the name of the observed property;
|
||||
|
||||
##wps.in: bounds, type = json, title = bounding box for the interpolation,
|
||||
## abstract = the corner coordinates of the bounding box to be used for
|
||||
## interpolation's prediction locations;
|
||||
|
||||
|
||||
# read the json and store the data in R data structures
|
||||
inputData <- fromJSON(data)
|
||||
myLog("Input data: \n ", toString(inputData))
|
||||
|
||||
phenomenon <- inputData$phenomenon
|
||||
myLog("Phenomenon: ", phenomenon)
|
||||
|
||||
# save the bounds
|
||||
southWest <- list("lat" = inputData$bounds$lat[["_southWest"]],
|
||||
"lon" = inputData$bounds$lng[["_southWest"]])
|
||||
northEast <- list("lat" = inputData$bounds$lat[["_northEast"]],
|
||||
"lon" = inputData$bounds$lng[["_northEast"]])
|
||||
|
||||
# image size
|
||||
width <- as.numeric(inputData$pixelBounds[["y"]]["max"]) - as.numeric(inputData$pixelBounds[["y"]]["min"])
|
||||
height <- as.numeric(inputData$pixelBounds[["x"]]["max"]) - as.numeric(inputData$pixelBounds[["x"]]["min"])
|
||||
myLog("width: ", width, ", height = ", height)
|
||||
|
||||
# values
|
||||
str(inputData$values)
|
||||
names(inputData$values)
|
||||
|
||||
lat <- sapply(inputData$values["coords"][[1]], "[[", 1)
|
||||
lon <- sapply(inputData$values["coords"][[1]], "[[", 2)
|
||||
time <- as.POSIXct(inputData$value$lastValue$timestamp/1000, origin="1970-01-01")
|
||||
value <- inputData$value$lastValue$value
|
||||
|
||||
pointDataFrame <- data.frame(lat, lon, time, value)
|
||||
inCRS <- CRS("+proj=utm +zone=33 +datum=WGS84")
|
||||
|
||||
pointData <- SpatialPointsDataFrame(
|
||||
coords = pointDataFrame[,c("lat", "lon")],
|
||||
data = pointDataFrame[,c("time", "value")],
|
||||
proj4string = inCRS)
|
||||
myLog("Got spatial data points with bbox ", toString(bbox(pointData)))
|
||||
summary(pointData)
|
||||
|
||||
#wps.off; TESTPLOT
|
||||
library("mapdata"); library(maptools)
|
||||
germany_p <- pruneMap(map(database = "worldHires", region = "Germany",
|
||||
plot = FALSE))
|
||||
germany_sp <- map2SpatialLines(germany_p, proj4string = inCRS)
|
||||
proj4string(germany_sp) <- inCRS
|
||||
plot(x = germany_sp, col = "grey")
|
||||
plot(pointData, pch = 20, col = "blue", add = TRUE)
|
||||
title("Testplot")
|
||||
#wps.on;
|
||||
|
||||
################################################################################
|
||||
# interpolation
|
||||
|
||||
# create sampling grid - TODO make sampling grid based on provided bounds
|
||||
x <- c(southWest$lon, northEast$lon)
|
||||
y <- c(southWest$lat, northEast$lat)
|
||||
xy <- cbind(x,y)
|
||||
grdBounds <- SpatialPoints(xy)
|
||||
myLog("Creating grid for interpolation within bounds ", toString(bbox(grdBounds)))
|
||||
|
||||
grdpoints = SpatialPoints(makegrid(x = grdBounds, n = cellNumber),
|
||||
proj4string = inCRS)
|
||||
grd = SpatialPixels(grdpoints)
|
||||
myLog("Interpolation output grid:")
|
||||
summary(grd)
|
||||
|
||||
#?interpolate
|
||||
interpolatedData <- interpolate(observations = pointData, predictionLocations = grd)
|
||||
myLog("Finished with interpolation: ", interpolatedData$processDescription)
|
||||
|
||||
#wps.off; INSPECT INTERPOLATION
|
||||
plotIntamap(interpolatedData)
|
||||
plot(interpolatedData$observations)
|
||||
plot(interpolatedData$predictions)
|
||||
plot(interpolatedData$predictions$var1.pred)
|
||||
|
||||
interpolationOut <- interpolatedData$predictions$var1.pred
|
||||
str(interpolationOut)
|
||||
#wps.on;
|
||||
|
||||
# project to UTM for interpolation
|
||||
# if(proj4string(inputPoints) != proj4string(raster)) {
|
||||
# myLog("projection of points and raster differ!\n",
|
||||
# proj4string(points), "\n", proj4string(raster))
|
||||
# inputPoints <- spTransform(points, CRS(proj4string(raster)))
|
||||
# }
|
||||
|
||||
################################################################################
|
||||
# output
|
||||
|
||||
method <- interpolatedData$processDescription
|
||||
# wps.out: method, type = string, title = process description,
|
||||
# abstract = a textual description of the used interpolation method;
|
||||
|
||||
image <- "interpolated.png"
|
||||
png(filename = image, width = width, height = height, units = "px")
|
||||
|
||||
trellis.par.set(axis.line = list(col=NA))
|
||||
# plot without any borders
|
||||
cut.val <- 0 # was -5 ### Just to force it.
|
||||
theme.novpadding <-
|
||||
list(layout.heights =
|
||||
list(top.padding = cut.val,
|
||||
main.key.padding = cut.val,
|
||||
key.axis.padding = cut.val,
|
||||
axis.xlab.padding = cut.val,
|
||||
xlab.key.padding = cut.val,
|
||||
key.sub.padding = cut.val,
|
||||
bottom.padding = cut.val),
|
||||
layout.widths =
|
||||
list(left.padding = cut.val,
|
||||
key.ylab.padding = cut.val,
|
||||
ylab.axis.padding = cut.val,
|
||||
axis.key.padding = cut.val,
|
||||
right.padding = cut.val))
|
||||
|
||||
spplot(interpolatedData$predictions[plotSwitch], col.regions = bpy.colors(),
|
||||
colorkey = FALSE, border = NA, ann = FALSE, axes = FALSE,
|
||||
par.settings = theme.novpadding)
|
||||
|
||||
graphics.off()
|
||||
myLog("Saved image ", image, " in ", getwd())
|
||||
# wps.out: image, type = png, title = the interpolated data,
|
||||
# abstract = interpolation output in png format;
|
||||
|
||||
imageBounds <- "imageBounds.json"
|
||||
# try to look like http://leafletjs.com/reference.html#imageoverlay
|
||||
jsonData <- list(c(southWest$lat, southWest$lon), c(northEast$lat, northEast$lon))
|
||||
myLog("Image bounds: ", toString(jsonData), " | bbox of interpolation data: ",
|
||||
toString(bbox(interpolatedData$predictions)))
|
||||
#json <- serializeJSON(bbox(points), pretty = TRUE, digits = 8)
|
||||
json <- toJSON(jsonData, pretty = TRUE, digits = 8)
|
||||
wrappedJson <- paste0("{ ", json, "}")
|
||||
write(x = json, file = imageBounds)
|
||||
myLog("Saved bounds in file ", imageBounds, " to ", getwd())
|
||||
# wps.out: imageBounds, type = json, title = image bounds,
|
||||
# abstract = the bounds of the image encoded as json;
|
|
@ -0,0 +1,35 @@
|
|||
# wps.des: demo.meuse.rdata, title = Script that returns meuse data as rdata-files,
|
||||
# abstract=The meuse river data set is contained in the sp-package of R - see package information;
|
||||
|
||||
# wps.in: filename, string, the base name of the generated files, value = meuse;
|
||||
|
||||
# wps.off;
|
||||
filename <- "meuse"
|
||||
setwd(tempdir())
|
||||
cat("wd: ", getwd(), "\n")
|
||||
# wps.on;
|
||||
|
||||
library(sp)
|
||||
data(meuse)
|
||||
data(meuse.grid)
|
||||
data(meuse.riv)
|
||||
|
||||
summary(meuse)
|
||||
summary(meuse.grid)
|
||||
summary(meuse.riv)
|
||||
|
||||
#wps.out: meuse.grid.rdata, rdata+Spatial, Spatial grid data from meuse;
|
||||
meuse.grid.rdata <- paste0(filename, ".grid.RData")
|
||||
save(meuse.grid, file=meuse.grid.rdata)
|
||||
|
||||
#wps.out: meuse.riv.rdata, rdata+SpatialPolygons;
|
||||
meuse.riv.rdata <- paste0(filename, ".riv.RData")
|
||||
save(meuse.riv.rdata, file=meuse.riv.rdata)
|
||||
|
||||
#wps.out: meuse.rdata, rdata+SpatialPoints, The meuse data samples;
|
||||
meuse.rdata <- paste0(filename, "meuse.RData")
|
||||
save(meuse, file=meuse.rdata)
|
||||
|
||||
#wps.out: workspace, rdata;
|
||||
workspace="workspace.RData"
|
||||
save.image(workspace)
|
|
@ -0,0 +1,99 @@
|
|||
# Copyright (C) 2012 by 52°North Initiative for Geospatial Open Source Software GmbH, Contact: info@52north.org
|
||||
# This document is licensed under Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0), see http://creativecommons.org/licenses/by-sa/3.0/ for details.
|
||||
# Author: Daniel Nuest (d.nuest@52north.org)
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[pegel-report] ", Sys.time(), " > ", ..., "\n"))
|
||||
}
|
||||
|
||||
myLog("Start script... ")
|
||||
|
||||
################################################################################
|
||||
# About
|
||||
#
|
||||
# This R script creates a Sweave report about water gauge stations in Germany
|
||||
# in a WPS.
|
||||
|
||||
################################################################################
|
||||
# define metadata, resources, inputs, and outputs
|
||||
|
||||
#wps.des: id = demo.pegelReport, title = Gauge Report,
|
||||
# abstract = create a pdf report for a water gauge analysis;
|
||||
#wps.resource: pegel-report.Rnw, Sweave.sty;
|
||||
|
||||
#wps.in: id = station_name, type = string, title = Station Name
|
||||
# abstract = Discover gauge station names here: http://pegelonline.wsv.de/gast/karte/standard_mini,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
|
||||
#wps.in: id = days, type = integer, title = Report duration
|
||||
# abstract = The number of days the reports goes back in time,
|
||||
# value = 1,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
#wps.out: id = report, type = pdf, title = pegel-analysis report;
|
||||
#wps.out: id = report_source, type = text, title = report source file,
|
||||
# abstract = The source file to generate the report for reproducibility;
|
||||
|
||||
################################################################################
|
||||
# constants and settings
|
||||
|
||||
report_file <- "pegel-report.Rnw"
|
||||
|
||||
process_description_url <- "N/A"
|
||||
resource_url_report_file <- "N/A"
|
||||
|
||||
#print(get("lasttry"))
|
||||
#print(bar)
|
||||
#cat(get("wpsServer"), "\n")
|
||||
#print(get("wpsProcessDescription"))
|
||||
|
||||
if(exists("wpsServer") && wpsServer) {
|
||||
myLog("Running in a WPS...")
|
||||
# get metadata when running in the server
|
||||
# cat(wpsResourceURL, "\n")
|
||||
# cat(wpsProcessDescription, "\n")
|
||||
process_description_url <- wpsProcessDescription
|
||||
resource_url_report_file <- paste0(wpsResourceURL, "/", report_file)
|
||||
|
||||
myLog("wps.description: ", wpsProcessDescription,
|
||||
" | wps.resource: ", wpsResourceURL)
|
||||
}
|
||||
else {
|
||||
myLog("NOT RUNNING ON SERVER!")
|
||||
}
|
||||
|
||||
|
||||
myLog("process description: ", process_description_url,
|
||||
" | report: ", report_file,
|
||||
" | public URL: ", resource_url_report_file);
|
||||
|
||||
################################################################################
|
||||
# save input variables for Rnw file
|
||||
|
||||
tPeriod_days <- days
|
||||
procedure_filter <- station_name
|
||||
myLog("tiem filter: ", tPeriod_days, " | procedures: ", procedure_filter)
|
||||
|
||||
################################################################################
|
||||
# generate report
|
||||
|
||||
# wps.off; for local testing
|
||||
files <- paste0(dirname(getwd()), "/resources/", c(report_file, "Sweave.sty"))
|
||||
setwd(tempdir())
|
||||
lapply(FUN = file.copy, X = files, to = getwd())
|
||||
myLog(" LOCAL TESTING in wd ", getwd())
|
||||
# wps.on;
|
||||
|
||||
myLog("Creating report with file ", report_file, " in ", getwd())
|
||||
Sweave(report_file)
|
||||
system("pdfLatex \"pegel-report.tex\"") #problem: doesn't run without interaction
|
||||
|
||||
report <- "pegel-report.pdf"
|
||||
report_source <- resource_url_report_file
|
||||
|
||||
myLog("report file: ", report,
|
||||
" | report source: ", report_source,
|
||||
" | public URL: ", resource_url_report_file);
|
||||
|
||||
|
||||
myLog("Done!")
|
|
@ -0,0 +1,32 @@
|
|||
# Copyright (C) 2012< by 52°North Initiative for Geospatial Open Source Software GmbH, Contact: info@52north.org
|
||||
# This document is licensed under Creative Commons Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0), see http://creativecommons.org/licenses/by-sa/3.0/ for details.
|
||||
# Author: Daniel Nuest (d.nuest@52north.org)
|
||||
|
||||
# Based on Sweave file from http://users.stat.umn.edu/~geyer/Sweave/#exam
|
||||
|
||||
#wps.des: demo.sweaveFoo, Creates a pdf report based on a simple Sweave file;
|
||||
#wps.in: dummy, integer, value = 0;
|
||||
#wps.out: report, pdf, Sweave output file;
|
||||
#wps.resource: sweave-foo.Rnw, Sweave.sty;
|
||||
|
||||
rnw_file <- "sweave-foo.Rnw"
|
||||
|
||||
# generate report
|
||||
Sweave(rnw_file)
|
||||
|
||||
library(tools)
|
||||
texi2dvi("sweave-foo.tex", pdf = TRUE)
|
||||
report <- "sweave-foo.pdf"
|
||||
|
||||
#wps.out: report_source, text, Sweave source file content;
|
||||
report_source <- rnw_file
|
||||
#wps.out: report_source_copy, text, just another copy of the Sweave file;
|
||||
report_source_copy <- rnw_file
|
||||
|
||||
#wps.out: report_source_link, string, reference link to Sweave source file;
|
||||
report_source_link <- "NA"
|
||||
if(exists("wpsResourceURL"))
|
||||
report_source_link <- paste0(wpsResourceURL, rnw_file)
|
||||
|
||||
# directly run the process with
|
||||
# http://localhost:8080/wps/WebProcessingService?Request=Execute&Service=WPS&version=1.0.0&identifier=org.n52.wps.server.r.test.sweaveFoo&DataInputs=dummy%3D42
|
|
@ -0,0 +1,150 @@
|
|||
# Copyright (C) 2011 by 52 North Initiative for Geospatial Open Source Software GmbH, Contact: info@52north.org
|
||||
# This program is free software; you can redistribute and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed WITHOUT ANY WARRANTY; even without the implied WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program (see gpl-2.0.txt). If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or visit the Free Software Foundation web page, http://www.fsf.org.
|
||||
# Author: Daniel Nuest (daniel.nuest@uni-muenster.de)
|
||||
# Project: sos4R - visit the project web page, http://www.nordholmen.net/sos4r
|
||||
library("sos4R")
|
||||
library("xts")
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[timeseriesPlot] ", ..., "\n"))
|
||||
}
|
||||
|
||||
myLog("Start script... ", Sys.time())
|
||||
|
||||
# wps.off;
|
||||
|
||||
# wps.des: id = demo.timeseriesPlot, title = Plot SOS Time Series,
|
||||
# abstract = Accesses a SOS with sos4R and creates a plot with a fitted
|
||||
# regression line;
|
||||
|
||||
# wps.in: sos_url, string, title = SOS service URL,
|
||||
# abstract = SOS URL endpoint,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
sos_url <- "http://sensorweb.demo.52north.org/PegelOnlineSOSv2.1/sos"
|
||||
|
||||
# wps.in: offering_id, type = string, title = identifier for the used offering,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
offering_id <- "WASSERSTAND_ROHDATEN"
|
||||
|
||||
# wps.in: offering_stationname, type = string,
|
||||
# title = string contained in identifier for the used offering,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
offering_stationname <- "Bake"
|
||||
|
||||
# wps.in: offering_hours, integer, temporal extent,
|
||||
# the number of hours the plot spans to the past,
|
||||
# value = 24, minOccurs = 0, maxOccurs = 1;
|
||||
offering_hours <- 24
|
||||
|
||||
# wps.in: image_width, type = integer,
|
||||
# title = width of the generated image in pixels,
|
||||
# value = 800, minOccurs = 0, maxOccurs = 1;
|
||||
# wps.in: image_height, type = integer,
|
||||
# title = height of the generated image in pixels,
|
||||
# value = 500, minOccurs = 0, maxOccurs = 1;
|
||||
image_width = 800;
|
||||
image_height = 500;
|
||||
|
||||
# wps.in: loess_span, type = double, title = local regression span parameter,
|
||||
# value = 0.75,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
loess_span <- 1
|
||||
|
||||
# wps.on;
|
||||
|
||||
################################################################################
|
||||
# SOS and time series analysis
|
||||
|
||||
converters <- SosDataFieldConvertingFunctions(
|
||||
"WASSERSTAND_ROHDATEN" = sosConvertDouble,
|
||||
"LUFTTEMPERATUR" = sosConvertDouble)
|
||||
|
||||
myLog("Creating SOS connection to ", sos_url)
|
||||
# establish a connection to a SOS instance with default settings
|
||||
sos <- SOS(url = sos_url, dataFieldConverters = converters)
|
||||
|
||||
# wps.off;
|
||||
names(sosOfferings(sos))
|
||||
# wps.on;
|
||||
|
||||
# set up request parameters
|
||||
offering <- sosOfferings(sos)[[offering_id]]
|
||||
myLog("Requesting for offering:\n", toString(offering))
|
||||
|
||||
offering_station_idxs <- grep(pattern = offering_stationname,
|
||||
sosProcedures(offering))
|
||||
# select on station at random
|
||||
stationFilter <- sosProcedures(offering)[
|
||||
offering_station_idxs[sample(1:length(offering_station_idxs), 1)]]
|
||||
myLog("Requesting data for station ", stationFilter)
|
||||
|
||||
observedPropertyFilter <- sosObservedProperties(offering)[1]
|
||||
myLog("Requesting data for observed property ", observedPropertyFilter)
|
||||
timeFilter <- sosCreateEventTimeList(sosCreateTimePeriod(sos = sos,
|
||||
begin = (Sys.time() - 3600 * offering_hours), end = Sys.time()))
|
||||
myLog("Requesting data for time ", toString(timeFilter))
|
||||
|
||||
# make the request
|
||||
myLog("Send request...")
|
||||
observation <- getObservation(sos = sos,# verbose = TRUE,
|
||||
#inspect = TRUE,
|
||||
observedProperty = observedPropertyFilter,
|
||||
procedure = stationFilter,
|
||||
eventTime = timeFilter,
|
||||
offering = offering)
|
||||
data <- sosResult(observation)
|
||||
# summary(data)
|
||||
# str(data)
|
||||
|
||||
myLog("Request finished!"); myLog(toString(str(data)))
|
||||
|
||||
# create time series ###########################################################
|
||||
timeField <- "SamplingTime"
|
||||
|
||||
valuesIndex <- 3
|
||||
values <- data[[names(data)[[valuesIndex]]]]
|
||||
|
||||
# create time series from data and plot
|
||||
timeSeries <- xts(x = values, order.by = data[[timeField]])
|
||||
|
||||
# calculate regression (polynomial fitting)
|
||||
regressionValues <- data[[names(data)[[valuesIndex]]]]
|
||||
regressionTime <- as.numeric(data[[timeField]])
|
||||
regression = loess(regressionValues~regressionTime, na.omit(data),
|
||||
span = loess_span)
|
||||
|
||||
# create plot ##################################################################
|
||||
timeseries_plot <- "output.jpg"
|
||||
jpeg(file = timeseries_plot, width = image_width, height = image_height,
|
||||
units = "px", quality = 90, bg = "#f3f3f3")
|
||||
|
||||
.title <- paste0("Dynamic Time Series Plot for ", toString(stationFilter))
|
||||
p <- plot(timeSeries, main = .title,
|
||||
sub = paste0(toString(unique(data[["feature"]])), "\n", sosUrl(sos), " @ ",
|
||||
toString(Sys.time())),
|
||||
xlab = attr(data[[timeField]], "name"),
|
||||
ylab = paste0(attr(values, "name"),
|
||||
" [", attr(values, "unit of measurement"), "]"),
|
||||
major.ticks = "days")
|
||||
lines(data[[timeField]], regression$fitted, col = 'red', lwd = 3)
|
||||
|
||||
graphics.off()
|
||||
|
||||
myLog("Created image: ", timeseries_plot)
|
||||
myLog("Working directory: ", getwd())
|
||||
|
||||
# wps.out: timeseries_plot, type = jpeg, title = time series plot,
|
||||
# abstract = the output image as a graphic in jpeg format;
|
||||
|
||||
|
||||
# test plot ####################################################################
|
||||
# wps.off;
|
||||
plot(timeSeries, main = "Test plot",
|
||||
sub = paste0(toString(unique(data[["feature"]])), "\n", sosUrl(sos)),
|
||||
xlab = attr(data[[timeField]], "name"),
|
||||
ylab = paste0(attr(values, "name"),
|
||||
" [", attr(values, "unit of measurement"), "]"),
|
||||
major.ticks = "days")
|
||||
lines(data[[timeField]], regression$fitted, col = 'red', lwd = 3)
|
||||
|
||||
# wps.on;
|
|
@ -0,0 +1,12 @@
|
|||
# wps.des: id = demo.uniform.table, title = Random number generator, version = 42,
|
||||
# abstract = Generates random numbers for uniform distribution;
|
||||
|
||||
# wps.in: min, double, Minimum, All outcomes are larger than min, value = 0;
|
||||
# wps.in: max, double, Maximum, All outcomes are smaller than max, value = 1;
|
||||
# wps.in: n, integer, ammount of random numbers, value = 100;
|
||||
x = runif(n, min=min, max=max)
|
||||
|
||||
# wps.out: output, text, Random number list,
|
||||
# Text file with list of n random numbers in one column;
|
||||
output = "random_out"
|
||||
write.table(x, output)
|
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# Author: Matthias Hinz
|
||||
###############################################################################
|
||||
|
||||
# wps.des: demo.uniform.simple, title = A Simple WPS Process,
|
||||
# abstract = Example Calculation with R;
|
||||
# wps.in: input, integer;
|
||||
# wps.out: output, double;
|
||||
|
||||
# calculate something... variable "input" don't have to be initialized
|
||||
output = runif(1)*input
|
|
@ -0,0 +1,166 @@
|
|||
## Function to import enviroCar trajectories
|
||||
################################################################################
|
||||
## Code modified from Edzer Pebesma and Nikolai Korte ##
|
||||
################################################################################
|
||||
|
||||
#
|
||||
# import function for enviroCar data
|
||||
#
|
||||
importEnviroCar = function(file) {
|
||||
require(rjson) # fromJSON
|
||||
require(maptools) # spCbind
|
||||
require(rgdal) #readOGR
|
||||
require(RCurl) #getURL
|
||||
require(stringr) #str_replace_all
|
||||
|
||||
# read data as spatial object:
|
||||
layer = readOGR(getURL(file,ssl.verifypeer = FALSE), layer = "OGRGeoJSON")
|
||||
|
||||
# convert time from text to POSIXct:
|
||||
layer$time = as.POSIXct(layer$time, format="%Y-%m-%dT%H:%M:%SZ")
|
||||
# the third column is JSON, we want it in a table (data.frame) form:
|
||||
# 1. form a list of lists
|
||||
l1 = lapply(as.character(layer[[3]]), fromJSON)
|
||||
# 2. parse the $value elements in the sublist:
|
||||
l2 = lapply(l1,function(x) as.data.frame(lapply(x, function(X) X$value)))
|
||||
# dynamic parsing of phenomenon names and units
|
||||
phenomenonsUrl = "https://www.envirocar.org/api/stable/phenomenons"
|
||||
phenomenons = fromJSON(getURL(phenomenonsUrl,ssl.verifypeer = FALSE))
|
||||
|
||||
colNames <- c("GPS.Bearing", "GPS.HDOP", "GPS.Speed")
|
||||
if (!all(colNames %in% names(l2[[1]])))
|
||||
stop("Trajectory does not contain all the necessary data (GPS.Bearing, GPS.HDOP, GPS.Speed)")
|
||||
else
|
||||
colNames <- names(l2[[1]])
|
||||
|
||||
|
||||
resultMatrix = matrix(nrow = length(l2),ncol = length(colNames))
|
||||
dimnames(resultMatrix)[[2]] = colNames
|
||||
for (i in seq(along = l2))
|
||||
resultMatrix[i,colNames] = as.numeric(l2[[i]])[match(colNames, names(l2[[i]]))]
|
||||
result = as.data.frame(resultMatrix)
|
||||
|
||||
# set the units:
|
||||
units <- sapply(phenomenons[[1]], "[[", "unit")
|
||||
names(units)=colNames
|
||||
|
||||
# add a units attribute to layer
|
||||
layer[[3]] = NULL
|
||||
# add the table as attributes to the spatial object
|
||||
if (length(layer) == nrow(result)) {
|
||||
layer = spCbind(layer, result)
|
||||
attr(layer, "units") = units
|
||||
layer
|
||||
} else
|
||||
NULL
|
||||
}
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[enviroCar MM] ", ..., "\n"))
|
||||
}
|
||||
|
||||
|
||||
# process description on localhost:
|
||||
# http://localhost:8080/wps/WebProcessingService?Request=DescribeProcess&service=WPS&version=1.0.0&identifier=org.n52.wps.server.r.enviroCar_osmMatching
|
||||
|
||||
################################################################################
|
||||
# process inputs
|
||||
|
||||
# testdata defined inline
|
||||
# wps.off;
|
||||
|
||||
# wps.des: id = enviroCar_osmMatching,
|
||||
# title = envirocar track to OSM streets matching,
|
||||
# abstract = Match an enviroCar track to the OpenStreetMap network with fuzzy matching algorithm;
|
||||
|
||||
# wps.in: trackId, string, title = trackIdentifier,
|
||||
# abstract = enviroCar track identifier,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
trackId <- "52f3836be4b0d8e8c27ed6f0"
|
||||
|
||||
# wps.in: envirocarApiEndpoint, string, title = envicoCar API,
|
||||
# abstract = enviroCar API endpoint for GET and POST requests,
|
||||
# value = https://envirocar.org/api/stable,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
envirocarApiEndpoint <- "https://envirocar.org/api/stable"
|
||||
|
||||
# wps.in: image_width, type = integer, title = width of the generated image in pixels,
|
||||
# value = 800, minOccurs = 0, maxOccurs = 1;
|
||||
# wps.in: image_height, type = integer, title = height of the generated image in pixels,
|
||||
# value = 500, minOccurs = 0, maxOccurs = 1;
|
||||
image_width = 800;
|
||||
image_height = 500;
|
||||
|
||||
# wps.on;
|
||||
|
||||
myLog("inputs: ", toString(paste(ls())), "")
|
||||
|
||||
################################################################################
|
||||
# process
|
||||
|
||||
myLog("working directory: ", getwd(), "\n")
|
||||
## URL of the trajectory
|
||||
trackUrl = paste0(envirocarApiEndpoint, "/tracks/", trackId)
|
||||
|
||||
myLog("Starting process for ", trackUrl, "\n")
|
||||
|
||||
## Import the trajectory
|
||||
traj = importEnviroCar(trackUrl)
|
||||
|
||||
# install fuzzyMM package from source file beforehand!
|
||||
require(fuzzyMM)
|
||||
|
||||
## Do the map matching
|
||||
matched_traj <- mm(traj, plot = FALSE)
|
||||
|
||||
# wps.off;
|
||||
str(matched_traj)
|
||||
# wps.on;
|
||||
|
||||
myLog("DONE! environment objects: ", toString(paste(ls())), "\n")
|
||||
|
||||
################################################################################
|
||||
# process outputs
|
||||
|
||||
# wps.out: matched_traj_data, type = rdata,
|
||||
# title = the trajectories,
|
||||
# abstract = the matched and original trajectory as RData;
|
||||
matched_traj_data <- paste0("matched_traj_", trackId, ".RData")
|
||||
save(traj, matched_traj, file = matched_traj_data)
|
||||
myLog("Saved matched track data: ", getwd(), "/", matched_traj_data)
|
||||
|
||||
## wps.out: matched_traj_shp, type = shp_x,
|
||||
## title = matched trajectory as SHP,
|
||||
## abstract = the matched and trajectory as a zipped shapefile;
|
||||
#matched_traj_shp <- paste0("matched_traj_shp_", trackId)
|
||||
#writeOGR(matched_traj, getwd(), matched_traj_shp, driver="ESRI Shapefile")
|
||||
#myLog("Saved matched track shapefile: ", getwd(), "/", matched_traj_shp)
|
||||
|
||||
# wps.out: orig_traj_json, type = text,
|
||||
# title = original trajectory as GeoJSON,
|
||||
# abstract = the original trajectory in Javascript Object Notation (JSON);
|
||||
orig_traj_json <- paste0("orig_traj_", trackId, ".json")
|
||||
writeOGR(traj, orig_traj_json, "traj", driver='GeoJSON')
|
||||
myLog("Saved original track GeoJSON: ", getwd(), "/", orig_traj_json)
|
||||
|
||||
# wps.out: matched_traj_json, type = text,
|
||||
# title = matched trajectory as GeoJSON,
|
||||
# abstract = the matched and trajectory in Javascript Object Notation (JSON);
|
||||
matched_traj_json <- paste0("matched_traj_", trackId, ".json")
|
||||
writeOGR(matched_traj, matched_traj_json, "matched_traj", driver='GeoJSON')
|
||||
myLog("Saved matched track GeoJSON: ", getwd(), "/", matched_traj_json)
|
||||
|
||||
# wps.out: output_image, type = image/png, title = The output plot,
|
||||
# abstract = On-the-fly generated plot showing the matched points and streets;
|
||||
output_image <- "output.png"
|
||||
png(file = output_image, width = image_width, height = image_height,
|
||||
units = "px")
|
||||
p <- plot(traj$coords.x1, traj$coords.x2, pch = 16, col = "blue",
|
||||
xlab = "longitude", ylab = "latitude")
|
||||
title(main = paste0("Matched track for ", trackId), sub = trackUrl)
|
||||
points(matched_traj$coords.x1, matched_traj$coords.x2,pch = 16, col = "red")
|
||||
roads <- create_drn(bbox(traj))
|
||||
lines(roads@sl)
|
||||
graphics.off()
|
||||
|
||||
myLog("Created image: ", getwd(), output_image)
|
|
@ -0,0 +1,123 @@
|
|||
# Copyright (C) 2013 by 52°North Initiative for Geospatial Open Source Software GmbH, Contact: info@52north.org
|
||||
# Author: Daniel Nuest (d.nuest@52north.org)
|
||||
|
||||
#wps.des: eo2hAirQuality, creates a coverage with interpolated air quality parameters from a SOS;
|
||||
|
||||
##################### dependencies #############################################
|
||||
library(maptools)
|
||||
library(rgdal)
|
||||
library(raster)
|
||||
library(fields)
|
||||
library(sos4R)
|
||||
library(stringr)
|
||||
|
||||
##################### helper functions #########################################
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[eo2h aqsax] ", ..., "\n"))
|
||||
}
|
||||
|
||||
###################### resources ###############################################
|
||||
# this resource has the required functions and data
|
||||
# wps.res: EO2H/AirQualityMapping.RData;
|
||||
|
||||
###################### manual testing ##########################################
|
||||
# wps.off;
|
||||
# full data file from wiki (too large for repo):
|
||||
download.file(url = "https://wiki.52north.org/pub/Geostatistics/WPS4R/AirQualityMapping-full.RData", destfile = "AirQualityMapping.RData")
|
||||
in_sos_url <- "http://141.30.100.135:8080/eo2heavenSOS/sos"
|
||||
in_time <- "2012-02-02"
|
||||
in_offering_id <- "o3"
|
||||
in_observed_prop <- "http://www.eo2heaven.org/classifier/parameter/daily_average/O3"
|
||||
in_stations <- "DESN019,DESN004,DESN014,DESN017,DESN001,DESN059,DESN053,DESN011,DESN052,DESN045,DESN051,DESN050,DESN049,DESN012,DESN024,DESN082,DESN080,DESN081,DESN085,DESN074,DESN079,DESN061,DESN076"
|
||||
# wps.on;
|
||||
|
||||
# FIXED resource loading does now work!
|
||||
load("AirQualityMapping.RData")
|
||||
|
||||
myLog("wd content:")
|
||||
myLog(ls())
|
||||
myLog("list.weights:")
|
||||
myLog(summary(list.weights))
|
||||
|
||||
###################### input definition ########################################
|
||||
|
||||
# wps.in: in_sos_url, string, title = SOS service URL,
|
||||
# abstract = SOS URL endpoint,
|
||||
# value = http://141.30.100.135:8080/eo2heavenSOS/sos,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
# wps.in: in_offering_id, type = string, title = identifier for the used offering,
|
||||
# value = O3,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
# wps.in: in_observed_prop, type = string, title = identifier for the observed property to request,
|
||||
# value = http://www.eo2heaven.org/classifier/parameter/daily_average/O3,
|
||||
# minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
# wps.in: in_stations, type = string, title = a comma seperated list of stations,
|
||||
# minOccurs = 1, maxOccurs = 1;A
|
||||
|
||||
# wps.in: in_time, type = string, title = time for TM_Equals filter,
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
|
||||
#################### make sos request (based on wmsConfig.xml) #################
|
||||
sos <- SOS(url = in_sos_url)
|
||||
|
||||
#FIXME 'eventTime' does not compute, errormessage:
|
||||
# Error in sosCreateTime(sos = sos, time = in_time, operator = "TM_Equals") :
|
||||
# object '.l' not found
|
||||
#eventTime <- sosCreateTime(sos = sos, time = in_time, operator = "TM_Equals")
|
||||
myLog(" time: ", in_time)
|
||||
|
||||
responseFormat <- "text/xml;subtype="om/1.0.0""
|
||||
|
||||
chr.pollutant <- in_offering_id
|
||||
myLog("pollutant: ", chr.pollutant)
|
||||
|
||||
stations <- str_split(string = in_stations, pattern = ",", )[[1]]
|
||||
vector.stations <- trim(stations)
|
||||
myLog("stations: (", length(vector.stations), "): ",
|
||||
toString(vector.stations))
|
||||
|
||||
################### parse the sos request ######################################
|
||||
# TODO
|
||||
in_measurements <- "10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32"
|
||||
measurements <- str_split(string = in_measurements, pattern = ",", )[[1]]
|
||||
vector.measurements <- str_trim(measurements)
|
||||
myLog("measurements (", length(vector.measurements), "): ",
|
||||
toString(vector.measurements))
|
||||
|
||||
################### calculate concentrations ###################################
|
||||
#
|
||||
# change function to return the output object from writeRaster
|
||||
#
|
||||
getPollutantConcentrationAsGeoTiff <- function(.vector.stations,
|
||||
.vector.measurements, .chr.pollutant, .chr.file) {
|
||||
#calculate raster
|
||||
.raster.result <- function.getPollutantConcentration(.vector.stations,
|
||||
.vector.measurements, .chr.pollutant)
|
||||
|
||||
#write result raster
|
||||
.x <- writeRaster(.raster.result, filename=.chr.file, format="GTiff",
|
||||
overwrite=TRUE)
|
||||
return(.x)
|
||||
}
|
||||
|
||||
##################### calculate the coverage > output ##########################
|
||||
|
||||
# FIXME use the function from the .RData file > extend this to a demo of how to
|
||||
# include funtions that are not part of the script nor a package on CRAN
|
||||
|
||||
#function.getPollutantConentrationAsGeoTiff(vector.stations, vector.measurements,
|
||||
# chr.pollutant, chr.file);
|
||||
output.file <- "saxony_output.tif";
|
||||
output.img <- getPollutantConcentrationAsGeoTiff(vector.stations, as.numeric(vector.measurements),
|
||||
chr.pollutant, output.file);
|
||||
##wps.out: output, geotiff;
|
||||
myLog("Done:")
|
||||
myLog(capture.output(output.img))
|
||||
|
||||
output <- paste0(getwd(), "/", output.file)
|
||||
myLog("Output file:")
|
||||
myLog(output)
|
||||
#wps.out: output, geotiff;
|
|
@ -0,0 +1,20 @@
|
|||
library(sp); library(maptools)
|
||||
# wps.des: id = geo.poly.attribute-sum, title = Sum of attributes for Polygons,
|
||||
# abstract = Calculates the sum of a numeric attribute variable for given Polygon files
|
||||
# given by execute request: variables with identifiers "data" and "attributename";
|
||||
|
||||
# wps.in: data, application/x-zipped-shp;
|
||||
# wps.in: attributename, string;
|
||||
input=readShapePoly(data)
|
||||
sum = sum(input@data[attributename])
|
||||
|
||||
#other output functions may be:
|
||||
#mean = mean(input@data[attributename])
|
||||
#median = median((input@data[attribute])[!is.na(input@data[attributname])])
|
||||
#max = max(input@data[attributename])
|
||||
#min = min(input@data[attributename])
|
||||
#quList = quantile(input@data[attributename], probs = seq(0, 1, 0.25), na.rm=T)
|
||||
|
||||
#output variable - shall be always named "result":
|
||||
result = sum
|
||||
# wps.out: result, double;
|
|
@ -0,0 +1,19 @@
|
|||
# Author: Matthias Hinz
|
||||
###############################################################################
|
||||
|
||||
# wps.des: geo.poly.intersection;
|
||||
# wps.in: r1, shp_x, Polygon1;
|
||||
# wps.in: r2, shp_x, Polygon2;
|
||||
library(rgeos); library(maptools); library(rgdal);
|
||||
|
||||
poly1 = readShapePoly(r1)
|
||||
poly2 = readShapePoly(r2)
|
||||
|
||||
polyint = gIntersection(poly1,poly2)
|
||||
|
||||
poly = as(polyint,"SpatialPolygonsDataFrame")
|
||||
|
||||
out="out.shp"
|
||||
writeOGR(poly,out,"data","ESRI Shapefile")
|
||||
|
||||
# wps.out: out, shp_x, Intersection Polygon;
|
|
@ -0,0 +1,42 @@
|
|||
# wps.des: highlight, "Transforms an R script into HTML/CSS with syntax highlights using the highlight package";
|
||||
|
||||
# wps.in: rcodeurl, type = string, title = code location,
|
||||
# abstract = "URL to the R code to highlight"
|
||||
# minOccurs = 1, maxOccurs = 1;
|
||||
|
||||
library(highlight)
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[highlight] ", Sys.time(), " > ", ..., "\n"))
|
||||
}
|
||||
|
||||
myLog("Start script... ")
|
||||
|
||||
tmpfile <- "rcode.txt"
|
||||
|
||||
#wps.off;
|
||||
rcodeurl <- "http://localhost:8080/wps/R/scripts/test_calculator.R"
|
||||
#wps.on;
|
||||
download.file(url = rcodeurl, destfile = tmpfile)
|
||||
|
||||
myLog("Downloaded script file to ", tmpfile, " in ", getwd())
|
||||
|
||||
# wps.off;
|
||||
rcode <- 'highlight(code = rcode, format = "html",
|
||||
output = output, detective = simple_detective,
|
||||
renderer = renderer_html( document = TRUE ),
|
||||
parser.output = parser(input, encoding = "UTF-8"))'
|
||||
write(rcode, file = tmpfile)
|
||||
# wps.on;
|
||||
|
||||
myLog("Saved code to file ", tmpfile, " in ", getwd())
|
||||
|
||||
html <- "rcode.html"
|
||||
h <- highlight(file = tmpfile, output = html, format = "html",
|
||||
detective = simple_detective,
|
||||
renderer = renderer_html( document = TRUE ),
|
||||
parser.output = parser(input, encoding = "UTF-8"))
|
||||
myLog("Saved to file ", html, " in ", getwd())
|
||||
|
||||
# wps.out: html, type = text/html,
|
||||
# abstract = "highlighted html code";
|
|
@ -0,0 +1,43 @@
|
|||
# wps.des: netCDF_aggregation, Process for aggregation of netCDF file data;
|
||||
# wps.in: file, netcdf_x, File to be aggregated;
|
||||
# wps.out: output, netcdf_x, Aggregated output file;
|
||||
|
||||
# change to your needs, preferably to a local copy. However, writing to NetCDF is by default disabled.
|
||||
#file <- "D:/Tomcat6/apache-tomcat-6.0.32/temp/ncInput1319114818439.nc"
|
||||
|
||||
#"//ifgifiles/projekte/UncertWeb/WP 3 Tools/Spatio-temporal Aggregation/Data/biotemperature_normalDistr.nc"
|
||||
|
||||
uncdf <- open.nc(file, write=F)
|
||||
print.nc(uncdf) # gives:
|
||||
|
||||
|
||||
spUNetCDF <- readUNetCDF(file, variables=c("biotemperature_variance"))
|
||||
|
||||
str(spUNetCDF)
|
||||
|
||||
spplot(spUNetCDF,col.regions=rev(heat.colors(100)))
|
||||
|
||||
# define new grid
|
||||
|
||||
scale <- 2 # factor of rescaling
|
||||
newCellsize <- scale*spUNetCDF@grid@cellsize # rescaling the cell size
|
||||
newCellcentre.offset <- spUNetCDF@bbox[,1]+newCellsize/2 # min of bbox + 1/2 new cellsize -> lower-left cellcentre
|
||||
newDim <- ceiling(c(diff(spUNetCDF@bbox[1,])/newCellsize[1], diff(spUNetCDF@bbox[2,])/newCellsize[2])) # calculating the new dimensions. The new grid will most likely extend the old grid on the top-right corner
|
||||
|
||||
gridTopo <- GridTopology(cellcentre.offset=newCellcentre.offset, cellsize=newCellsize, cells.dim=newDim)
|
||||
newGrid <- SpatialGrid(gridTopo, proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
|
||||
str(newGrid)
|
||||
|
||||
newGrid@bbox
|
||||
spUNetCDF@bbox # is contained in the new bbox
|
||||
|
||||
newPixels <- as(newGrid,"SpatialPixels") # there seems to be a problem with grids, works fine for SpatialPixels
|
||||
str(newPixels)
|
||||
|
||||
spAgg <- aggregate.Spatial(spUNetCDF,newPixels,mean)
|
||||
|
||||
str(spAgg)
|
||||
spplot(spAgg)
|
||||
|
||||
writeUNetCDF(newfile="newUNetCDF.nc", spAgg)
|
||||
output = "newUNetCDF.nc"
|
|
@ -0,0 +1,19 @@
|
|||
# wps.des: test.calculator, process for misusing R as a calculator;
|
||||
|
||||
# wps.in: a, double, value=1;
|
||||
# wps.in: b, double, value=1;
|
||||
# wps.in: op, string, value=+;
|
||||
|
||||
# wps.off;
|
||||
op <- "+"
|
||||
a <- 23
|
||||
b <- 42
|
||||
# wps.on;
|
||||
|
||||
result <- do.call(op, list(a, b))
|
||||
|
||||
#wps.out: result, double, calculation result;
|
||||
|
||||
# wps.off;
|
||||
result
|
||||
# wps.on;
|
|
@ -0,0 +1,30 @@
|
|||
###############################################################################
|
||||
#wps.des: id = test.csv, title = Test script for csv output,
|
||||
# abstract = returns the data of the Meuse test dataset as csv;
|
||||
|
||||
#wps.in: id = filename, title = file name for the output, abstract = dummy
|
||||
# variable because we need input - will be prepended to the generated files,
|
||||
# type = string, value = test_geo, minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
# wps.off;
|
||||
filename <- "test_csv_"
|
||||
setwd(tempdir())
|
||||
# wps.on;
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[test.csv] ", Sys.time(), " > ", ..., "\n"))
|
||||
}
|
||||
myLog("Start script... wd: ", getwd())
|
||||
|
||||
library("sp")
|
||||
|
||||
# load data
|
||||
data("meuse")
|
||||
coordinates(meuse) <- ~ x+y
|
||||
|
||||
data <- paste0(filename, "meuse.csv")
|
||||
write.csv(x = meuse@data, file = data)
|
||||
#wps.out: id = data, type = text/csv, title = meuse data;
|
||||
|
||||
myLog("Done, save csv to file ", data, " in ", getwd())
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# wps.des: id = test.defaults, title = dummy process,
|
||||
# abstract = test process for default value annotations;
|
||||
|
||||
# wps.in: id = a, type = integer, value = 4;
|
||||
# wps.in: id = b, type = double, value = 2.5;
|
||||
# wps.in: id = c, type = double, value = 32;
|
||||
|
||||
# wps.in: id = z, type = boolean, value = true;
|
||||
|
||||
# wps.in: id = y, type = string;
|
||||
|
||||
if(z == TRUE && is.na(y)) {
|
||||
out <- (a * b) + c
|
||||
}
|
||||
|
||||
#wps.out: id = out, type = integer, title = sum of inputs;
|
|
@ -0,0 +1,26 @@
|
|||
# wps.des: test.echo, title = dummy echo process, abstract = you get what you put in;
|
||||
|
||||
# wps.in: id = inputVariable, type = string, title = input variable, minOccurs = 1, maxOccurs = 1;
|
||||
|
||||
# wps.off;
|
||||
inputVariable <- "The quick brown fox jumps over the lazy dog"
|
||||
# wps.on;
|
||||
|
||||
# test that the renaming measures do not affect the script
|
||||
quitter <- inputVariable
|
||||
|
||||
uuunlinkkk <- quitter
|
||||
evaluator <- uuunlinkkk
|
||||
qevalq <- evaluator
|
||||
systemo <- qevalq
|
||||
setwdsetwdsetwd <- systemo
|
||||
|
||||
outputVariable <- setwdsetwdsetwd
|
||||
|
||||
if(inputVariable == "Hallo Echo!")
|
||||
outputVariable <- "Hallo Otto!"
|
||||
|
||||
#wps.out: id = outputVariable, type = string, title = returning input variable;
|
||||
|
||||
# wps.off;
|
||||
outputVariable
|
|
@ -0,0 +1,56 @@
|
|||
###############################################################################
|
||||
#wps.des: id = test.geo, title = Test script for geospatial data output,
|
||||
# abstract = returns the Meuse test dataset and returns it as shapefile and
|
||||
# GeoTIFF;
|
||||
|
||||
#wps.in: id = filename, title = file name for the output, abstract = dummy
|
||||
# variable because we need input - will be prepended to the generated files,
|
||||
# type = string, value = test_geo, minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
# wps.off;
|
||||
filename <- "test_geo"
|
||||
setwd("D:/TEMP")
|
||||
# wps.on;
|
||||
|
||||
myLog <- function(...) {
|
||||
cat(paste0("[test.geo] ", Sys.time(), " > ", ..., "\n"))
|
||||
}
|
||||
myLog("Start script... wd: ", getwd())
|
||||
|
||||
library("sp")
|
||||
library("rgdal")
|
||||
|
||||
# load data
|
||||
data("meuse")
|
||||
coordinates(meuse) <- ~ x+y
|
||||
|
||||
###############################################################################
|
||||
# shapefile output
|
||||
# http://spatial-analyst.net/book/system/files/GstatIntro.pdf
|
||||
writeOGR(meuse, ".", "meuse", "ESRI Shapefile")
|
||||
meuse_vector <- "meuse.shp"
|
||||
#wps.out: id = meuse_vector, type = application/x-zipped-shp, title = shapefile
|
||||
# of the meuse dataset;
|
||||
|
||||
myLog("Wrote shapefile meuse.shp")
|
||||
|
||||
###############################################################################
|
||||
# raster output
|
||||
data(meuse.grid)
|
||||
coordinates(meuse.grid) <- ~x+y
|
||||
proj4string(meuse.grid) <- CRS("+init=epsg:28992")
|
||||
gridded(meuse.grid) <- TRUE
|
||||
#spplot(meuse.grid)
|
||||
|
||||
raster_filename <- paste0(filename, "_raster.tif")
|
||||
meuse_raster <- writeGDAL(meuse.grid["dist"], fn = raster_filename, drivername = "GTiff")
|
||||
#meuse_raster <- paste(getwd(), raster, sep="/")
|
||||
#wps.out: id = meuse_raster, type = geotiff, title = gridded meuse dataset,
|
||||
# abstract = gridded meuse dataset (variable 'dist') in GeoTIFF format;
|
||||
|
||||
myLog("Wrote raster ", raster_filename)
|
||||
|
||||
meuse_summary <- "meuse_summary.txt"
|
||||
capture.output(summary(meuse), file = meuse_summary)
|
||||
#wps.out: id = meuse_summary, type = text, title = statistical summary of the
|
||||
# dataset;
|
|
@ -0,0 +1,22 @@
|
|||
# wps.des: test.image, title = demo image process generating a plot of the Meuse dataset;
|
||||
|
||||
# wps.in: size, integer, title = image size,
|
||||
# abstract = the horizontal and vertical size of the image in pixels,
|
||||
# value = 500;
|
||||
|
||||
#wps.off;
|
||||
size <- 420
|
||||
setwd(tempdir())
|
||||
getwd()
|
||||
#wps.on;
|
||||
|
||||
image <- "output.png"
|
||||
png(file = image, width = size, height = size)
|
||||
x <- c(1,2,3,4)
|
||||
y <- c(1,7,4,2)
|
||||
plot(x, y, main = "WPS4R test plot", sub = toString(Sys.time()))
|
||||
|
||||
graphics.off()
|
||||
cat("Saved image ", image, " in ", getwd())
|
||||
|
||||
# wps.out: id = image, type = png, title = a simple plot;
|
|
@ -0,0 +1,53 @@
|
|||
# wps.des: test.resources, "Resources-Tester", abstract="A test script to demonstrate how resources are handled by wps4r", author = "Matthias Hinz";
|
||||
# wps.in: inputDummy, string, title = "Input-Dummy",
|
||||
# abstract = unused input value,
|
||||
# value = "Dummy input value";
|
||||
|
||||
#wps.resource: test/dummy1.txt, test/dummy2.png;
|
||||
library(rgdal)
|
||||
|
||||
raster = readGDAL("dummy2.png")
|
||||
textResourceContent = readLines("dummy1.txt", warn=F)
|
||||
imageResourceWidth = bbox(raster)["x","max"]
|
||||
|
||||
warning("This process is only for testing purposes and contains no valid output")
|
||||
|
||||
# wps.out: textResourceContent, string, "Dummy-Output", "Content of the dummy-txt file";
|
||||
# wps.out: imageResourceWidth, integer, "Dummy-Output", "Width of the test resource image in px (480)";
|
||||
|
||||
if(!is.element("dummy1.txt", list.files(getwd())))
|
||||
warn("File resources directory was not correctly copied")
|
||||
|
||||
###############################################################################
|
||||
# directory as resource
|
||||
#wps.resource: test/dir;
|
||||
|
||||
subdirSize <- length(list.dirs(recursive = FALSE))
|
||||
# wps.out: subdirSize, integer, "Dummy-Output",
|
||||
# "The number of directories in the test directory (1)";
|
||||
|
||||
directoryResourceDir <- "dir"
|
||||
recursiveSubdirSize <- length(list.dirs(path = directoryResourceDir))
|
||||
# wps.out: recursiveSubdirSize, integer, "Dummy-Output",
|
||||
# "The number of directories recursively counted in the test directory (3)";
|
||||
|
||||
if(!is.element("dummy2.txt", list.files(directoryResourceDir)))
|
||||
warn("File from directory in resources directory was not copied to wd subdir")
|
||||
|
||||
directoryResourceContentSize <- length(list.files(path = directoryResourceDir,
|
||||
pattern = "dummy"))
|
||||
# wps.out: directoryResourceContentSize, integer, "Dummy-Output",
|
||||
# "The number of files in the test directory (1)";
|
||||
|
||||
subdirTextContent <- as.double(
|
||||
read.table(paste0(directoryResourceDir, "/dummy2.txt"))[1,1])
|
||||
# wps.out: subdirTextContent, double, "Dummy-Output",
|
||||
# "The number in the dummy file in the test directory (42)";
|
||||
|
||||
subdirSubfolderTextContent <- as.double(
|
||||
read.table(paste0(directoryResourceDir, "/folder/subfolder/dummy3.txt"))[1,1])
|
||||
# wps.out: subdirSubfolderTextContent, integer, "Dummy-Output",
|
||||
# "The number in the dummy file in the test directory (17)";
|
||||
|
||||
if(is.element("dummy2.txt", list.files(getwd())))
|
||||
warn("File from directory in resources directory was incorrectly copied to base wd")
|
|
@ -0,0 +1,9 @@
|
|||
# wps.des: test.warnings, "Warnings-Tester", abstract="A test script to demonstrate how warnings are derived from R", author = "Matthias Hinz";
|
||||
# wps.in: inputDummy, string, "Input-Dummy", value="Dummy input value";
|
||||
warning("Test warning 1 ...")
|
||||
warning("Test warning 2: This is a warning with some more text.")
|
||||
warning("This process is only for testing purposes and contains no valid output.")
|
||||
warning("Test warning 4: This is the LAST warning.")
|
||||
|
||||
dummyOutput = paste0("Dummy output value ", inputDummy)
|
||||
# wps.out: dummyOutput, string, "Dummy-Output";
|
|
@ -0,0 +1,19 @@
|
|||
# wps.des: test.wpsOff, dummy process for testing wps.off annotations;
|
||||
|
||||
# wps.off;
|
||||
a = 1
|
||||
b = 2
|
||||
c = 3
|
||||
# wps.on;
|
||||
|
||||
# wps.in: id = a, type = integer, minOccurs = 1, maxOccurs = 1;
|
||||
# wps.in: id = b, type = integer, minOccurs = 1, maxOccurs = 1;
|
||||
# wps.in: id = c, type = integer, minOccurs = 1, maxOccurs = 1;
|
||||
|
||||
out <- a + b + c
|
||||
|
||||
#wps.off;
|
||||
out <- 17
|
||||
#wps.on;
|
||||
|
||||
#wps.out: id = out, type = integer, title = sum of inputs;
|
|
@ -0,0 +1,28 @@
|
|||
#wps.des: id = test.session, title = Test script for session variables,
|
||||
# abstract = simply returns the session variables that should have been loaded
|
||||
# by the WPS into the R session;
|
||||
|
||||
#wps.resource: test/dummy1.txt, test/dummy2.png;
|
||||
|
||||
#wps.in: id = dummy, title = dummy input, abstract = dummy input - not used,
|
||||
# type = string, value = 52N, minOccurs = 0, maxOccurs = 1;
|
||||
|
||||
wps <- wpsServer
|
||||
#wps.out: wps, type = boolean, title = server flag,
|
||||
# abstract = a flag that is true if the process is executed within a WPS server;
|
||||
|
||||
processdescription <- wpsProcessDescription
|
||||
#wps.out: processdescription, type = string, title = process description,
|
||||
# abstract = the link to the process description of this process;
|
||||
|
||||
servername <- wpsServerName
|
||||
#wps.out: servername, type = string, title = server name,
|
||||
# abstract = a name for the executing server of this process;
|
||||
|
||||
resourceurl <- wpsResourceURL
|
||||
#wps.out: resourceurl, type = string, title = resource base url,
|
||||
# abstract = the base URL to access the resources of this process;
|
||||
|
||||
resources <- toString(wpsScriptResources)
|
||||
#wps.out: resources, type = string, title = list of resources,
|
||||
# abstract = a string listing the resources of this process;
|
|
@ -0,0 +1,71 @@
|
|||
library(rgdal)
|
||||
library(automap)
|
||||
|
||||
#wps.des: uncertweb.make-realizations, title = Realization process UncertWeb,
|
||||
# abstract = Makes realizations out of two geotiffs;
|
||||
#wps.in: spdf, geotiff;
|
||||
#wps.in: uspdf, geotiff;
|
||||
|
||||
#importinput:
|
||||
spdf = readGDAL(spdf)
|
||||
uspdf = readGDAL(uspdf)
|
||||
|
||||
makeRealizations = function(spdf, uspdf, nsim) {
|
||||
# if variable name is null
|
||||
if (is.null(names(spdf))) names(spdf) = "var"
|
||||
sppdf = spdf
|
||||
# make SpatialPointsDataFrame from SpatialGridDataFrame
|
||||
gridded(sppdf) = FALSE
|
||||
# number of cells in the raster
|
||||
nall = dim(sppdf)[1]
|
||||
|
||||
##### sample 3000 cells from all cells #####
|
||||
# random non-spatial sample
|
||||
spsdf = sppdf[sample(1:nall, 3000),]
|
||||
# centering and scaling of pixel values
|
||||
# centering: subtract overall average from all pixels
|
||||
# scaling: dividing all pixels by overall standard deviation
|
||||
spsdf@data = as.data.frame(scale(spsdf@data))
|
||||
|
||||
##### fit variogram to cell sample #####
|
||||
# variogram for centered and scaled data from sample
|
||||
vario = autofitVariogram(as.formula(paste(names(spsdf), "~1")), spsdf)$var_model
|
||||
rang = vario$range[2]
|
||||
# define number of new samples by range and grid area (minimum 5)
|
||||
nsamp = as.integer(areaSpatialGrid(spdf)/(rang*rang)) + 5
|
||||
# slocs = sppdf[sample(1:nall, nsamp),]
|
||||
|
||||
##### new spatial sample from cell sample #####
|
||||
# random spatial sample
|
||||
slocs = spsample(spsdf, nsamp, "random")
|
||||
# get cell size in x and y direction from the original grid
|
||||
gp = gridparameters(spdf)
|
||||
dx = gp$cellsize[1]
|
||||
dy = gp$cellsize[2]
|
||||
# shift coordinates by a random factor (-0.5 to 0.5) times the cell size
|
||||
slocs@coords = slocs@coords +
|
||||
matrix( runif(nsamp*2) -0.5, ncol = 2)*matrix(c(rep(dx,nsamp),rep(dy,nsamp)),ncol=2)
|
||||
# build spatial points dataframe from the sample with the new coordinates and values of 0
|
||||
# this will serve as prediction locations
|
||||
slocs = SpatialPointsDataFrame(slocs, data = data.frame(dat = rep(0,nsamp)))
|
||||
|
||||
##### Conditional simulation using the new sample locations #####
|
||||
# creates a new grid based on the original grid with nsim simulations for residuals (mean=0)
|
||||
sims = krige(as.formula(paste(names(slocs), "~1")), slocs, spdf, vario, nsim = nsim, nmax = 8)
|
||||
sims2 = sims
|
||||
#sims2@data = spdf@data + sims@data * uspdf@data
|
||||
# recalculate final results by multiplying scaled simulations with uncertainty grid and adding to mean grid
|
||||
sims2@data = as.data.frame(apply(sims@data, 2, rescale))
|
||||
return(sims2)
|
||||
}
|
||||
|
||||
# helper function to recalculate values from residual simulations
|
||||
rescale = function(df1){
|
||||
df = spdf@data+df1*uspdf@data
|
||||
return(df)
|
||||
}
|
||||
|
||||
nsims <- makeRealizations(spdf, uspdf, nsim = 10)
|
||||
|
||||
output = writeGDAL(nsims, "output.tif", driver="GTiff")
|
||||
#wps.out: output, geotiff;
|
|
@ -0,0 +1,40 @@
|
|||
# Function to unzip input files and rename them in R
|
||||
# Files which are not zipped will be just renamed
|
||||
#
|
||||
# Author: Matthias Hinz
|
||||
###############################################################################
|
||||
|
||||
unzipRename = function(file, name, ext){
|
||||
t=unzip(file)
|
||||
baseFileName = paste(name,ext,sep=".")
|
||||
|
||||
if(length(t)==0){
|
||||
file.rename(file, baseFileName)
|
||||
return(baseFileName)
|
||||
}
|
||||
|
||||
for(i in t){
|
||||
suffix = gsub("./","",i)
|
||||
suffix = unlist(strsplit(i,"\\."))
|
||||
if(length(suffix)>1){
|
||||
suffix = suffix[length(suffix)]
|
||||
suffix = paste(".",suffix,sep="")
|
||||
}else suffix = ""
|
||||
newName = paste(name,suffix,sep="")
|
||||
file.rename(i, newName)
|
||||
}
|
||||
|
||||
return(baseFileName)
|
||||
}
|
||||
|
||||
zipShp = function(file){
|
||||
base = unlist(strsplit(file,"\\."))[1]
|
||||
shx = paste(base,"shx", sep=".")
|
||||
dbf = paste(base,"dbf", sep=".")
|
||||
prj = paste(base,"prj", sep=".")
|
||||
zip = paste(base,"zip", sep=".")
|
||||
zip(zip, c(file,shx,dbf,prj))
|
||||
if(zip %in% dir())
|
||||
return(zip)
|
||||
else return(NULL)
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
# Copyright (C) 2014 52°North Initiative for Geospatial Open Source
|
||||
# Software GmbH
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 as published
|
||||
# by the Free Software Foundation.
|
||||
#
|
||||
# If the program is linked with libraries which are licensed under one of
|
||||
# the following licenses, the combination of the program with the linked
|
||||
# library is not considered a "derivative work" of the program:
|
||||
#
|
||||
# • Apache License, version 2.0
|
||||
# • Apache Software License, version 1.0
|
||||
# • GNU Lesser General Public License, version 3
|
||||
# • Mozilla Public License, versions 1.0, 1.1 and 2.0
|
||||
# • Common Development and Distribution License (CDDL), version 1.0
|
||||
#
|
||||
# Therefore the distribution of the program linked with libraries licensed
|
||||
# under the aforementioned licenses, is permitted by the copyright holders
|
||||
# if the distribution is compliant with both the GNU General Public
|
||||
# License version 2 and the aforementioned licenses.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
||||
# Public License for more details.
|
||||
###############################################################################
|
||||
|
||||
wpsProgressVariable <- "wpsProgress"
|
||||
wpsProgress <- 0
|
||||
wpsProgressRange <- c(0, 100)
|
||||
|
||||
wpsProgressLogging <- TRUE
|
||||
|
||||
wpsProgressEnv <- new.env()
|
||||
|
||||
isWpsProgressLogSupported <- function() {
|
||||
return(exists(wpsProgressVariable));
|
||||
}
|
||||
|
||||
wpsProgressLog <- function(...) {
|
||||
if(wpsProgressLogging) cat("[wps progress]", toString(Sys.time()), " > ",
|
||||
..., "\n")
|
||||
}
|
||||
|
||||
wpsSetProgress <- function(progress) {
|
||||
assign(wpsProgressVariable, progress, envir = wpsProgressEnv)
|
||||
wpsProgressLog("set to ", progress)
|
||||
}
|
||||
|
||||
wpsGetProgress <- function(progress) {
|
||||
.p <- get(wpsProgressVariable, envir = wpsProgressEnv)
|
||||
return(.p)
|
||||
}
|
||||
|
||||
wpsIncreaseProgress <- function(increase = 1) {
|
||||
.p <- wpsGetProgress()
|
||||
.p <- .p + increase
|
||||
wpsSetProgress(.p)
|
||||
return(.p)
|
||||
}
|
||||
|
||||
wpsResetProgress <- function() {
|
||||
wpsSetProgress(wpsProgressRange[1])
|
||||
return(wpsGetProgress())
|
||||
}
|
||||
|
||||
wpsGetProgressPercentage <- function() {
|
||||
.p <- wpsGetProgress() / wpsProgressRange[2]
|
||||
return(.p)
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# testing
|
||||
|
||||
# wps.off;
|
||||
is.environment(wpsProgressEnv)
|
||||
|
||||
wpsResetProgress()
|
||||
|
||||
wpsSetProgress(50)
|
||||
wpsGetProgress()
|
||||
wpsGetProgressPercentage()
|
||||
|
||||
wpsSetProgress(42)
|
||||
wpsGetProgress()
|
||||
wpsGetProgressPercentage()
|
||||
|
||||
wpsIncreaseProgress()
|
||||
wpsIncreaseProgress()
|
||||
wpsIncreaseProgress()
|
||||
wpsIncreaseProgress()
|
||||
wpsIncreaseProgress()
|
||||
wpsIncreaseProgress()
|
||||
wpsGetProgressPercentage()
|
||||
|
||||
wpsIncreaseProgress(17)
|
||||
wpsIncreaseProgress(17)
|
||||
wpsGetProgressPercentage()
|
||||
|
||||
wpsResetProgress()
|
||||
# wps.on;
|
|
@ -0,0 +1,4 @@
|
|||
gCube System - License
|
||||
------------------------------------------------------------
|
||||
|
||||
${gcube.license}
|
|
@ -0,0 +1,66 @@
|
|||
The gCube System - WPS
|
||||
--------------------------------------------------
|
||||
|
||||
${description}
|
||||
|
||||
|
||||
${gcube.description}
|
||||
|
||||
${gcube.funding}
|
||||
|
||||
|
||||
Version
|
||||
--------------------------------------------------
|
||||
|
||||
1.0.0-SNAPSHOT (${buildDate})
|
||||
|
||||
Please see the file named "changelog.xml" in this directory for the release notes.
|
||||
|
||||
|
||||
Authors
|
||||
--------------------------------------------------
|
||||
|
||||
* Lucio Lelii (lucio.lelii@isti.cnr.it), CNR, Italy
|
||||
|
||||
|
||||
Maintainers
|
||||
-----------
|
||||
|
||||
* Lucio Lelii (lucio.lelii@isti.cnr.it), CNR, Italy
|
||||
|
||||
|
||||
Download information
|
||||
--------------------------------------------------
|
||||
|
||||
Source code is available from SVN:
|
||||
${scm.url}
|
||||
|
||||
Binaries can be downloaded from the gCube website:
|
||||
${gcube.website}
|
||||
|
||||
|
||||
Installation
|
||||
--------------------------------------------------
|
||||
|
||||
Installation documentation is available on-line in the gCube Wiki:
|
||||
${gcube.wikiRoot}/RConnector
|
||||
|
||||
|
||||
Documentation
|
||||
--------------------------------------------------
|
||||
|
||||
Documentation is available on-line in the gCube Wiki:
|
||||
${gcube.wikiRoot}/RConnector
|
||||
|
||||
|
||||
Support
|
||||
--------------------------------------------------
|
||||
|
||||
Bugs and support requests can be reported in the gCube issue tracking tool:
|
||||
${gcube.issueTracking}
|
||||
|
||||
|
||||
Licensing
|
||||
--------------------------------------------------
|
||||
|
||||
This software is licensed under the terms you may find in the file named "LICENSE" in this directory.
|
|
@ -0,0 +1,26 @@
|
|||
<ReleaseNotes>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.2" date="2017-03-27">
|
||||
<Change>add user script is executed also when configfile.csv is not present</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.2" date="2017-02-22">
|
||||
<Change>porting to tabualr-data-cl 2.0.0</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.1.0" date="2016-10-03">
|
||||
<Change>porting to smartgears 2.0</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.0.1" date="2016-05-13">
|
||||
<Change>algorithm for cookie encryption modified to support RStudio server 0.99</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.2.0.0" date="2016-03-15">
|
||||
<Change>connect method with empty parameters added</Change>
|
||||
<Change>Authorization Framework integration</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.1-0-1"
|
||||
date="2015-5-20">
|
||||
<Change>Bug fixing</Change>
|
||||
</Changeset>
|
||||
<Changeset component="org.gcube.data-analysis.r-connector.1-0-0"
|
||||
date="2014-11-02">
|
||||
<Change>First Release</Change>
|
||||
</Changeset>
|
||||
</ReleaseNotes>
|
|
@ -0,0 +1,32 @@
|
|||
<assembly
|
||||
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
|
||||
<id>servicearchive</id>
|
||||
<formats>
|
||||
<format>tar.gz</format>
|
||||
</formats>
|
||||
<baseDirectory>/</baseDirectory>
|
||||
<fileSets>
|
||||
<fileSet>
|
||||
<directory>/home/lucio/workspace/imarine/wps/distro</directory>
|
||||
<outputDirectory>/</outputDirectory>
|
||||
<useDefaultExcludes>true</useDefaultExcludes>
|
||||
<includes>
|
||||
<include>README</include>
|
||||
<include>LICENSE</include>
|
||||
<include>changelog.xml</include>
|
||||
<include>profile.xml</include>
|
||||
</includes>
|
||||
<fileMode>755</fileMode>
|
||||
<filtered>true</filtered>
|
||||
</fileSet>
|
||||
</fileSets>
|
||||
<files>
|
||||
<file>
|
||||
<source>target/wps.war</source>
|
||||
<outputDirectory>/wps</outputDirectory>
|
||||
</file>
|
||||
|
||||
</files>
|
||||
</assembly>
|
|
@ -0,0 +1,6 @@
|
|||
<application mode="online">
|
||||
<name>DataMiner</name>
|
||||
<group>WPS</group>
|
||||
<version>1.0</version>
|
||||
<description>A service implementing a WPS provider in the D4Science e-Infrastructure</description>
|
||||
</application>
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ID />
|
||||
<Type>Service</Type>
|
||||
<Profile>
|
||||
<Description>${description}</Description>
|
||||
<Class>DataAnalysis</Class>
|
||||
<Name>wps</Name>
|
||||
<Version>1.0.0</Version>
|
||||
<Packages>
|
||||
<Software>
|
||||
<Name>wps</Name>
|
||||
<Version>1.0.0-SNAPSHOT</Version>
|
||||
<MavenCoordinates>
|
||||
<groupId>org.gcube.data-analysis</groupId>
|
||||
<artifactId>wps</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
</MavenCoordinates>
|
||||
<Files>
|
||||
<File>wps.jar</File>
|
||||
</Files>
|
||||
</Software>
|
||||
</Packages>
|
||||
</Profile>
|
||||
</Resource>
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<taglib version="2.1" xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-jsptaglibrary_2_1.xsd">
|
||||
<tlib-version>2.0</tlib-version>
|
||||
<short-name>wps</short-name>
|
||||
<uri>http://52north.org/communities/geoprocessing/wps/tags</uri>
|
||||
<function>
|
||||
<name>classExists</name>
|
||||
<function-class>org.n52.wps.webadmin.JSTLFunctions</function-class>
|
||||
<function-signature>boolean classExists(java.lang.String)</function-signature>
|
||||
</function>
|
||||
<function>
|
||||
<name>hasR</name>
|
||||
<function-class>org.n52.wps.webadmin.JSTLFunctions</function-class>
|
||||
<function-signature>boolean hasR()</function-signature>
|
||||
</function>
|
||||
</taglib>
|
|
@ -0,0 +1,157 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<web-app xmlns="http://java.sun.com/xml/ns/j2ee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://java.sun.com/xml/ns/j2ee http://java.sun.com/xml/ns/j2ee/web-app_2_4.xsd"
|
||||
version="2.4">
|
||||
<display-name>52°North Web Processing Service, Git: 1665e1b7b2188755161d4f0f3a6acf562d0444e1 @ 2015-03-21 00:30:20</display-name>
|
||||
<description>A web processing framework supporting the OGC WPS 1.0.0 specification</description>
|
||||
|
||||
<!-- security-constraint>
|
||||
<web-resource-collection>
|
||||
<web-resource-name>My JSP</web-resource-name>
|
||||
<url-pattern>/webAdmin/index.jsp</url-pattern>
|
||||
<http-method>GET</http-method>
|
||||
<http-method>POST</http-method>
|
||||
</web-resource-collection>
|
||||
<auth-constraint>
|
||||
<role-name>wpsAdmin</role-name>
|
||||
</auth-constraint>
|
||||
<user-data-constraint>
|
||||
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
|
||||
</user-data-constraint-->
|
||||
<!-- /security-constraint>
|
||||
|
||||
<login-config>
|
||||
<auth-method>BASIC</auth-method>
|
||||
<realm-name>Basic Authentication</realm-name>
|
||||
</login-config>
|
||||
|
||||
<Security roles referenced by this web application -->
|
||||
<!-- >security-role>
|
||||
<description>The role that is required to log in to the Manager Application</description>
|
||||
<role-name>manager</role-name>
|
||||
</security-role-->
|
||||
|
||||
<!--filter>
|
||||
<filter-name>CommunicationSizeLogFilter</filter-name>
|
||||
<filter-class>org.n52.wps.server.CommunicationSizeLogFilter</filter-class>
|
||||
</filter-->
|
||||
<!--filter>
|
||||
<filter-name>CompressingFilter</filter-name>
|
||||
<filter-class>com.planetj.servlet.filter.compression.CompressingFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>debug</param-name>
|
||||
<param-value>false</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>statsEnabled</param-name>
|
||||
<param-value>true</param-value>
|
||||
</init-param>
|
||||
</filter>
|
||||
|
||||
<filter-mapping>
|
||||
<filter-name>CompressingFilter</filter-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</filter-mapping-->
|
||||
<!-->filter-mapping>
|
||||
<filter-name>CommunicationSizeLogFilter</filter-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</filter-mapping-->
|
||||
|
||||
<!-- <filter>
|
||||
<filter-name>ResponseURLFilter</filter-name>
|
||||
<filter-class>org.n52.wps.server.ResponseURLFilter</filter-class>
|
||||
</filter>
|
||||
<filter-mapping>
|
||||
<filter-name>ResponseURLFilter</filter-name>
|
||||
<url-pattern>*</url-pattern>
|
||||
</filter-mapping>-->
|
||||
|
||||
<servlet>
|
||||
<servlet-name>wpsServlet</servlet-name>
|
||||
<servlet-class>org.gcube.dataanalysis.wps.statisticalmanager.synchserver.web.WebProcessingService</servlet-class>
|
||||
<!--<servlet-class>org.n52.wps.server.WebProcessingService</servlet-class>-->
|
||||
<load-on-startup>0</load-on-startup>
|
||||
<init-param>
|
||||
<param-name>wps.config.file</param-name>
|
||||
<param-value>config/wps_config.xml</param-value>
|
||||
</init-param>
|
||||
</servlet>
|
||||
<servlet>
|
||||
<servlet-name>retrieveResultServlet</servlet-name>
|
||||
<servlet-class>org.n52.wps.server.RetrieveResultServlet</servlet-class>
|
||||
<load-on-startup>1</load-on-startup>
|
||||
</servlet>
|
||||
<servlet-mapping>
|
||||
<servlet-name>wpsServlet</servlet-name>
|
||||
<url-pattern>/WebProcessingService</url-pattern>
|
||||
</servlet-mapping>
|
||||
<servlet-mapping>
|
||||
<servlet-name>retrieveResultServlet</servlet-name>
|
||||
<url-pattern>/RetrieveResultServlet</url-pattern>
|
||||
</servlet-mapping>
|
||||
<welcome-file-list>
|
||||
<welcome-file>/index.html</welcome-file>
|
||||
</welcome-file-list>
|
||||
|
||||
<!-- 52n Security -->
|
||||
<context-param>
|
||||
<param-name>security.config.validation</param-name>
|
||||
<param-value>false</param-value>
|
||||
<!--description>
|
||||
disables validation of the security-config.xml this is necessary
|
||||
because the MavenProject: org.n52.wps:52n-wps-webapp:3.3.0-SNAPSHOT @ D:\dev\GitHub4w\WPS\52n-wps-webapp\pom.xml mechanism works only if the validation is disabled.
|
||||
</description-->
|
||||
</context-param>
|
||||
|
||||
<filter>
|
||||
<filter-name>CORS</filter-name>
|
||||
<filter-class>com.thetransactioncompany.cors.CORSFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>cors.allowOrigin</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.allowGenericHttpRequests</param-name>
|
||||
<param-value>true</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.supportedMethods</param-name>
|
||||
<param-value>GET, POST, HEAD, PUT, DELETE, OPTIONS</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.supportedHeaders</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
<init-param>
|
||||
<param-name>cors.exposedHeaders</param-name>
|
||||
<param-value>*</param-value>
|
||||
</init-param>
|
||||
</filter>
|
||||
<filter-mapping>
|
||||
<filter-name>CORS</filter-name>
|
||||
<url-pattern>/*</url-pattern>
|
||||
</filter-mapping>
|
||||
|
||||
<!--
|
||||
<filter>
|
||||
<filter-name>authn</filter-name> -->
|
||||
<!-- display-name>Authentication Chain Filter</display-name-->
|
||||
<!-- description>
|
||||
Delegates calls to AuthenticationChainFilter that is defined in the security-config.
|
||||
</description-->
|
||||
<!-- <filter-class>org.n52.security.service.config.support.SecurityConfigDelegatingServletFilter</filter-class>
|
||||
</filter>
|
||||
|
||||
<filter-mapping>
|
||||
<filter-name>authn</filter-name>
|
||||
<url-pattern>/webAdmin/*</url-pattern>
|
||||
</filter-mapping>
|
||||
|
||||
|
||||
<listener>
|
||||
<listener-class>org.n52.security.service.config.support.SecurityConfigContextListener</listener-class>
|
||||
</listener>
|
||||
|
||||
-->
|
||||
|
||||
</web-app>
|
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2007-2009 Marijn Haverbeke
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any
|
||||
damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any
|
||||
purpose, including commercial applications, and to alter it and
|
||||
redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must
|
||||
not claim that you wrote the original software. If you use this
|
||||
software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must
|
||||
not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
|
||||
Marijn Haverbeke
|
||||
marijnh at gmail
|
|
@ -0,0 +1,402 @@
|
|||
/* CodeMirror main module
|
||||
*
|
||||
* Implements the CodeMirror constructor and prototype, which take care
|
||||
* of initializing the editor frame, and providing the outside interface.
|
||||
*/
|
||||
|
||||
// The CodeMirrorConfig object is used to specify a default
|
||||
// configuration. If you specify such an object before loading this
|
||||
// file, the values you put into it will override the defaults given
|
||||
// below. You can also assign to it after loading.
|
||||
var CodeMirrorConfig = window.CodeMirrorConfig || {};
|
||||
|
||||
var CodeMirror = (function(){
|
||||
function setDefaults(object, defaults) {
|
||||
for (var option in defaults) {
|
||||
if (!object.hasOwnProperty(option))
|
||||
object[option] = defaults[option];
|
||||
}
|
||||
}
|
||||
function forEach(array, action) {
|
||||
for (var i = 0; i < array.length; i++)
|
||||
action(array[i]);
|
||||
}
|
||||
|
||||
// These default options can be overridden by passing a set of
|
||||
// options to a specific CodeMirror constructor. See manual.html for
|
||||
// their meaning.
|
||||
setDefaults(CodeMirrorConfig, {
|
||||
stylesheet: "",
|
||||
path: "",
|
||||
parserfile: [],
|
||||
basefiles: ["util.js", "stringstream.js", "select.js", "undo.js", "editor.js", "tokenize.js"],
|
||||
iframeClass: null,
|
||||
passDelay: 200,
|
||||
passTime: 50,
|
||||
lineNumberDelay: 200,
|
||||
lineNumberTime: 50,
|
||||
continuousScanning: false,
|
||||
saveFunction: null,
|
||||
onChange: null,
|
||||
undoDepth: 50,
|
||||
undoDelay: 800,
|
||||
disableSpellcheck: true,
|
||||
textWrapping: true,
|
||||
readOnly: false,
|
||||
width: "",
|
||||
height: "300px",
|
||||
autoMatchParens: false,
|
||||
parserConfig: null,
|
||||
tabMode: "indent", // or "spaces", "default", "shift"
|
||||
reindentOnLoad: false,
|
||||
activeTokens: null,
|
||||
cursorActivity: null,
|
||||
lineNumbers: false,
|
||||
indentUnit: 2
|
||||
});
|
||||
|
||||
function addLineNumberDiv(container) {
|
||||
var nums = document.createElement("DIV"),
|
||||
scroller = document.createElement("DIV");
|
||||
nums.style.position = "absolute";
|
||||
nums.style.height = "100%";
|
||||
if (nums.style.setExpression) {
|
||||
try {nums.style.setExpression("height", "this.previousSibling.offsetHeight + 'px'");}
|
||||
catch(e) {} // Seems to throw 'Not Implemented' on some IE8 versions
|
||||
}
|
||||
nums.style.top = "0px";
|
||||
nums.style.overflow = "hidden";
|
||||
container.appendChild(nums);
|
||||
scroller.className = "CodeMirror-line-numbers";
|
||||
nums.appendChild(scroller);
|
||||
return nums;
|
||||
}
|
||||
|
||||
function CodeMirror(place, options) {
|
||||
// Backward compatibility for deprecated options.
|
||||
if (options.dumbTabs) options.tabMode = "spaces";
|
||||
else if (options.normalTab) options.tabMode = "default";
|
||||
|
||||
// Use passed options, if any, to override defaults.
|
||||
this.options = options = options || {};
|
||||
setDefaults(options, CodeMirrorConfig);
|
||||
|
||||
var frame = this.frame = document.createElement("IFRAME");
|
||||
if (options.iframeClass) frame.className = options.iframeClass;
|
||||
frame.frameBorder = 0;
|
||||
frame.src = "javascript:false;";
|
||||
frame.style.border = "0";
|
||||
frame.style.width = '100%';
|
||||
frame.style.height = '100%';
|
||||
// display: block occasionally suppresses some Firefox bugs, so we
|
||||
// always add it, redundant as it sounds.
|
||||
frame.style.display = "block";
|
||||
|
||||
var div = this.wrapping = document.createElement("DIV");
|
||||
div.style.position = "relative";
|
||||
div.className = "CodeMirror-wrapping";
|
||||
div.style.width = options.width;
|
||||
div.style.height = options.height;
|
||||
|
||||
if (place.appendChild) place.appendChild(div);
|
||||
else place(div);
|
||||
div.appendChild(frame);
|
||||
if (options.lineNumbers) this.lineNumbers = addLineNumberDiv(div);
|
||||
|
||||
// Link back to this object, so that the editor can fetch options
|
||||
// and add a reference to itself.
|
||||
frame.CodeMirror = this;
|
||||
this.win = frame.contentWindow;
|
||||
|
||||
if (typeof options.parserfile == "string")
|
||||
options.parserfile = [options.parserfile];
|
||||
if (typeof options.stylesheet == "string")
|
||||
options.stylesheet = [options.stylesheet];
|
||||
|
||||
var html = ["<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\" \"http://www.w3.org/TR/html4/loose.dtd\"><html><head>"];
|
||||
// Hack to work around a bunch of IE8-specific problems.
|
||||
html.push("<meta http-equiv=\"X-UA-Compatible\" content=\"IE=EmulateIE7\"/>");
|
||||
forEach(options.stylesheet, function(file) {
|
||||
html.push("<link rel=\"stylesheet\" type=\"text/css\" href=\"" + file + "\"/>");
|
||||
});
|
||||
forEach(options.basefiles.concat(options.parserfile), function(file) {
|
||||
html.push("<script type=\"text/javascript\" src=\"" + options.path + file + "\"><" + "/script>");
|
||||
});
|
||||
html.push("</head><body style=\"border-width: 0;\" class=\"editbox\" spellcheck=\"" +
|
||||
(options.disableSpellcheck ? "false" : "true") + "\"></body></html>");
|
||||
|
||||
var doc = this.win.document;
|
||||
doc.open();
|
||||
doc.write(html.join(""));
|
||||
doc.close();
|
||||
}
|
||||
|
||||
CodeMirror.prototype = {
|
||||
init: function() {
|
||||
if (this.options.initCallback) this.options.initCallback(this);
|
||||
if (this.options.lineNumbers) this.activateLineNumbers();
|
||||
if (this.options.reindentOnLoad) this.reindent();
|
||||
},
|
||||
|
||||
getCode: function() {return this.editor.getCode();},
|
||||
setCode: function(code) {this.editor.importCode(code);},
|
||||
selection: function() {this.focusIfIE(); return this.editor.selectedText();},
|
||||
reindent: function() {this.editor.reindent();},
|
||||
reindentSelection: function() {this.focusIfIE(); this.editor.reindentSelection(null);},
|
||||
|
||||
focusIfIE: function() {
|
||||
// in IE, a lot of selection-related functionality only works when the frame is focused
|
||||
if (this.win.select.ie_selection) this.focus();
|
||||
},
|
||||
focus: function() {
|
||||
this.win.focus();
|
||||
if (this.editor.selectionSnapshot) // IE hack
|
||||
this.win.select.setBookmark(this.win.document.body, this.editor.selectionSnapshot);
|
||||
},
|
||||
replaceSelection: function(text) {
|
||||
this.focus();
|
||||
this.editor.replaceSelection(text);
|
||||
return true;
|
||||
},
|
||||
replaceChars: function(text, start, end) {
|
||||
this.editor.replaceChars(text, start, end);
|
||||
},
|
||||
getSearchCursor: function(string, fromCursor, caseFold) {
|
||||
return this.editor.getSearchCursor(string, fromCursor, caseFold);
|
||||
},
|
||||
|
||||
undo: function() {this.editor.history.undo();},
|
||||
redo: function() {this.editor.history.redo();},
|
||||
historySize: function() {return this.editor.history.historySize();},
|
||||
clearHistory: function() {this.editor.history.clear();},
|
||||
|
||||
grabKeys: function(callback, filter) {this.editor.grabKeys(callback, filter);},
|
||||
ungrabKeys: function() {this.editor.ungrabKeys();},
|
||||
|
||||
setParser: function(name) {this.editor.setParser(name);},
|
||||
setSpellcheck: function(on) {this.win.document.body.spellcheck = on;},
|
||||
setTextWrapping: function(on) {
|
||||
if (on == this.options.textWrapping) return;
|
||||
this.win.document.body.style.whiteSpace = on ? "" : "nowrap";
|
||||
this.options.textWrapping = on;
|
||||
if (this.lineNumbers) {
|
||||
this.setLineNumbers(false);
|
||||
this.setLineNumbers(true);
|
||||
}
|
||||
},
|
||||
setIndentUnit: function(unit) {this.win.indentUnit = unit;},
|
||||
setUndoDepth: function(depth) {this.editor.history.maxDepth = depth;},
|
||||
setTabMode: function(mode) {this.options.tabMode = mode;},
|
||||
setLineNumbers: function(on) {
|
||||
if (on && !this.lineNumbers) {
|
||||
this.lineNumbers = addLineNumberDiv(this.wrapping);
|
||||
this.activateLineNumbers();
|
||||
}
|
||||
else if (!on && this.lineNumbers) {
|
||||
this.wrapping.removeChild(this.lineNumbers);
|
||||
this.wrapping.style.marginLeft = "";
|
||||
this.lineNumbers = null;
|
||||
}
|
||||
},
|
||||
|
||||
cursorPosition: function(start) {this.focusIfIE(); return this.editor.cursorPosition(start);},
|
||||
firstLine: function() {return this.editor.firstLine();},
|
||||
lastLine: function() {return this.editor.lastLine();},
|
||||
nextLine: function(line) {return this.editor.nextLine(line);},
|
||||
prevLine: function(line) {return this.editor.prevLine(line);},
|
||||
lineContent: function(line) {return this.editor.lineContent(line);},
|
||||
setLineContent: function(line, content) {this.editor.setLineContent(line, content);},
|
||||
removeLine: function(line){this.editor.removeLine(line);},
|
||||
insertIntoLine: function(line, position, content) {this.editor.insertIntoLine(line, position, content);},
|
||||
selectLines: function(startLine, startOffset, endLine, endOffset) {
|
||||
this.win.focus();
|
||||
this.editor.selectLines(startLine, startOffset, endLine, endOffset);
|
||||
},
|
||||
nthLine: function(n) {
|
||||
var line = this.firstLine();
|
||||
for (; n > 1 && line !== false; n--)
|
||||
line = this.nextLine(line);
|
||||
return line;
|
||||
},
|
||||
lineNumber: function(line) {
|
||||
var num = 0;
|
||||
while (line !== false) {
|
||||
num++;
|
||||
line = this.prevLine(line);
|
||||
}
|
||||
return num;
|
||||
},
|
||||
|
||||
// Old number-based line interface
|
||||
jumpToLine: function(n) {
|
||||
this.selectLines(this.nthLine(n), 0);
|
||||
this.win.focus();
|
||||
},
|
||||
currentLine: function() {
|
||||
return this.lineNumber(this.cursorPosition().line);
|
||||
},
|
||||
|
||||
activateLineNumbers: function() {
|
||||
var frame = this.frame, win = frame.contentWindow, doc = win.document, body = doc.body,
|
||||
nums = this.lineNumbers, scroller = nums.firstChild, self = this;
|
||||
var barWidth = null;
|
||||
|
||||
function sizeBar() {
|
||||
if (frame.offsetWidth == 0) return;
|
||||
for (var root = frame; root.parentNode; root = root.parentNode);
|
||||
if (!nums.parentNode || root != document || !win.Editor) {
|
||||
// Clear event handlers (their nodes might already be collected, so try/catch)
|
||||
try{clear();}catch(e){}
|
||||
clearInterval(sizeInterval);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nums.offsetWidth != barWidth) {
|
||||
barWidth = nums.offsetWidth;
|
||||
nums.style.left = "-" + (frame.parentNode.style.marginLeft = barWidth + "px");
|
||||
}
|
||||
}
|
||||
function doScroll() {
|
||||
nums.scrollTop = body.scrollTop || doc.documentElement.scrollTop || 0;
|
||||
}
|
||||
// Cleanup function, registered by nonWrapping and wrapping.
|
||||
var clear = function(){};
|
||||
sizeBar();
|
||||
var sizeInterval = setInterval(sizeBar, 500);
|
||||
|
||||
function nonWrapping() {
|
||||
var nextNum = 1;
|
||||
function update() {
|
||||
var target = 50 + Math.max(body.offsetHeight, frame.offsetHeight);
|
||||
while (scroller.offsetHeight < target) {
|
||||
scroller.appendChild(document.createElement("DIV"));
|
||||
scroller.lastChild.innerHTML = nextNum++;
|
||||
}
|
||||
doScroll();
|
||||
}
|
||||
var onScroll = win.addEventHandler(win, "scroll", update, true),
|
||||
onResize = win.addEventHandler(win, "resize", update, true);
|
||||
clear = function(){onScroll(); onResize();};
|
||||
}
|
||||
function wrapping() {
|
||||
var node, lineNum, next, pos;
|
||||
|
||||
function addNum(n) {
|
||||
if (!lineNum) lineNum = scroller.appendChild(document.createElement("DIV"));
|
||||
lineNum.innerHTML = n;
|
||||
pos = lineNum.offsetHeight + lineNum.offsetTop;
|
||||
lineNum = lineNum.nextSibling;
|
||||
}
|
||||
function work() {
|
||||
if (!scroller.parentNode || scroller.parentNode != self.lineNumbers) return;
|
||||
|
||||
var endTime = new Date().getTime() + self.options.lineNumberTime;
|
||||
while (node) {
|
||||
addNum(next++);
|
||||
for (; node && !win.isBR(node); node = node.nextSibling) {
|
||||
var bott = node.offsetTop + node.offsetHeight;
|
||||
while (bott - 3 > pos) addNum(" ");
|
||||
}
|
||||
if (node) node = node.nextSibling;
|
||||
if (new Date().getTime() > endTime) {
|
||||
pending = setTimeout(work, self.options.lineNumberDelay);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// While there are un-processed number DIVs, or the scroller is smaller than the frame...
|
||||
var target = 50 + Math.max(body.offsetHeight, frame.offsetHeight);
|
||||
while (lineNum || scroller.offsetHeight < target) addNum(next++);
|
||||
doScroll();
|
||||
}
|
||||
function start() {
|
||||
doScroll();
|
||||
node = body.firstChild;
|
||||
lineNum = scroller.firstChild;
|
||||
pos = 0;
|
||||
next = 1;
|
||||
work();
|
||||
}
|
||||
|
||||
start();
|
||||
var pending = null;
|
||||
function update() {
|
||||
if (pending) clearTimeout(pending);
|
||||
if (self.editor.allClean()) start();
|
||||
else pending = setTimeout(update, 200);
|
||||
}
|
||||
self.updateNumbers = update;
|
||||
var onScroll = win.addEventHandler(win, "scroll", doScroll, true),
|
||||
onResize = win.addEventHandler(win, "resize", update, true);
|
||||
clear = function(){
|
||||
if (pending) clearTimeout(pending);
|
||||
if (self.updateNumbers == update) self.updateNumbers = null;
|
||||
onScroll();
|
||||
onResize();
|
||||
};
|
||||
}
|
||||
(this.options.textWrapping ? wrapping : nonWrapping)();
|
||||
}
|
||||
};
|
||||
|
||||
CodeMirror.InvalidLineHandle = {toString: function(){return "CodeMirror.InvalidLineHandle";}};
|
||||
|
||||
CodeMirror.replace = function(element) {
|
||||
if (typeof element == "string")
|
||||
element = document.getElementById(element);
|
||||
return function(newElement) {
|
||||
element.parentNode.replaceChild(newElement, element);
|
||||
};
|
||||
};
|
||||
|
||||
CodeMirror.fromTextArea = function(area, options) {
|
||||
if (typeof area == "string")
|
||||
area = document.getElementById(area);
|
||||
|
||||
options = options || {};
|
||||
if (area.style.width && options.width == null)
|
||||
options.width = area.style.width;
|
||||
if (area.style.height && options.height == null)
|
||||
options.height = area.style.height;
|
||||
if (options.content == null) options.content = area.value;
|
||||
|
||||
if (area.form) {
|
||||
function updateField() {
|
||||
area.value = mirror.getCode();
|
||||
}
|
||||
if (typeof area.form.addEventListener == "function")
|
||||
area.form.addEventListener("submit", updateField, false);
|
||||
else
|
||||
area.form.attachEvent("onsubmit", updateField);
|
||||
}
|
||||
|
||||
function insert(frame) {
|
||||
if (area.nextSibling)
|
||||
area.parentNode.insertBefore(frame, area.nextSibling);
|
||||
else
|
||||
area.parentNode.appendChild(frame);
|
||||
}
|
||||
|
||||
area.style.display = "none";
|
||||
var mirror = new CodeMirror(insert, options);
|
||||
return mirror;
|
||||
};
|
||||
|
||||
CodeMirror.isProbablySupported = function() {
|
||||
// This is rather awful, but can be useful.
|
||||
var match;
|
||||
if (window.opera)
|
||||
return Number(window.opera.version()) >= 9.52;
|
||||
else if (/Apple Computers, Inc/.test(navigator.vendor) && (match = navigator.userAgent.match(/Version\/(\d+(?:\.\d+)?)\./)))
|
||||
return Number(match[1]) >= 3;
|
||||
else if (document.selection && window.ActiveXObject && (match = navigator.userAgent.match(/MSIE (\d+(?:\.\d*)?)\b/)))
|
||||
return Number(match[1]) >= 6;
|
||||
else if (match = navigator.userAgent.match(/gecko\/(\d{8})/i))
|
||||
return Number(match[1]) >= 20050901;
|
||||
else if (match = navigator.userAgent.match(/AppleWebKit\/(\d+)/))
|
||||
return Number(match[1]) >= 525;
|
||||
else
|
||||
return null;
|
||||
};
|
||||
|
||||
return CodeMirror;
|
||||
})();
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,68 @@
|
|||
// Minimal framing needed to use CodeMirror-style parsers to highlight
|
||||
// code. Load this along with tokenize.js, stringstream.js, and your
|
||||
// parser. Then call highlightText, passing a string as the first
|
||||
// argument, and as the second argument either a callback function
|
||||
// that will be called with an array of SPAN nodes for every line in
|
||||
// the code, or a DOM node to which to append these spans, and
|
||||
// optionally (not needed if you only loaded one parser) a parser
|
||||
// object.
|
||||
|
||||
// Stuff from util.js that the parsers are using.
|
||||
var StopIteration = {toString: function() {return "StopIteration"}};
|
||||
|
||||
var Editor = {};
|
||||
var indentUnit = 2;
|
||||
|
||||
(function(){
|
||||
function normaliseString(string) {
|
||||
var tab = "";
|
||||
for (var i = 0; i < indentUnit; i++) tab += " ";
|
||||
|
||||
string = string.replace(/\t/g, tab).replace(/\u00a0/g, " ").replace(/\r\n?/g, "\n");
|
||||
var pos = 0, parts = [], lines = string.split("\n");
|
||||
for (var line = 0; line < lines.length; line++) {
|
||||
if (line != 0) parts.push("\n");
|
||||
parts.push(lines[line]);
|
||||
}
|
||||
|
||||
return {
|
||||
next: function() {
|
||||
if (pos < parts.length) return parts[pos++];
|
||||
else throw StopIteration;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
window.highlightText = function(string, callback, parser) {
|
||||
var parser = (parser || Editor.Parser).make(stringStream(normaliseString(string)));
|
||||
var line = [];
|
||||
if (callback.nodeType == 1) {
|
||||
var node = callback;
|
||||
callback = function(line) {
|
||||
for (var i = 0; i < line.length; i++)
|
||||
node.appendChild(line[i]);
|
||||
node.appendChild(document.createElement("BR"));
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
var token = parser.next();
|
||||
if (token.value == "\n") {
|
||||
callback(line);
|
||||
line = [];
|
||||
}
|
||||
else {
|
||||
var span = document.createElement("SPAN");
|
||||
span.className = token.style;
|
||||
span.appendChild(document.createTextNode(token.value));
|
||||
line.push(span);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
if (e != StopIteration) throw e;
|
||||
}
|
||||
if (line.length) callback(line);
|
||||
}
|
||||
})();
|
|
@ -0,0 +1,81 @@
|
|||
/* Demonstration of embedding CodeMirror in a bigger application. The
|
||||
* interface defined here is a mess of prompts and confirms, and
|
||||
* should probably not be used in a real project.
|
||||
*/
|
||||
|
||||
function MirrorFrame(place, options) {
|
||||
this.home = document.createElement("DIV");
|
||||
if (place.appendChild)
|
||||
place.appendChild(this.home);
|
||||
else
|
||||
place(this.home);
|
||||
|
||||
var self = this;
|
||||
function makeButton(name, action) {
|
||||
var button = document.createElement("INPUT");
|
||||
button.type = "button";
|
||||
button.value = name;
|
||||
self.home.appendChild(button);
|
||||
button.onclick = function(){self[action].call(self);};
|
||||
}
|
||||
|
||||
makeButton("Search", "search");
|
||||
makeButton("Replace", "replace");
|
||||
makeButton("Current line", "line");
|
||||
makeButton("Jump to line", "jump");
|
||||
makeButton("Insert constructor", "macro");
|
||||
makeButton("Indent all", "reindent");
|
||||
|
||||
this.mirror = new CodeMirror(this.home, options);
|
||||
}
|
||||
|
||||
MirrorFrame.prototype = {
|
||||
search: function() {
|
||||
var text = prompt("Enter search term:", "");
|
||||
if (!text) return;
|
||||
|
||||
var first = true;
|
||||
do {
|
||||
var cursor = this.mirror.getSearchCursor(text, first, true);
|
||||
first = false;
|
||||
while (cursor.findNext()) {
|
||||
cursor.select();
|
||||
if (!confirm("Search again?"))
|
||||
return;
|
||||
}
|
||||
} while (confirm("End of document reached. Start over?"));
|
||||
},
|
||||
|
||||
replace: function() {
|
||||
// This is a replace-all, but it is possible to implement a
|
||||
// prompting replace.
|
||||
var from = prompt("Enter search string:", ""), to;
|
||||
if (from) to = prompt("What should it be replaced with?", "");
|
||||
if (to == null) return;
|
||||
|
||||
var cursor = this.mirror.getSearchCursor(from, false);
|
||||
while (cursor.findNext())
|
||||
cursor.replace(to);
|
||||
},
|
||||
|
||||
jump: function() {
|
||||
var line = prompt("Jump to line:", "");
|
||||
if (line && !isNaN(Number(line)))
|
||||
this.mirror.jumpToLine(Number(line));
|
||||
},
|
||||
|
||||
line: function() {
|
||||
alert("The cursor is currently at line " + this.mirror.currentLine());
|
||||
this.mirror.focus();
|
||||
},
|
||||
|
||||
macro: function() {
|
||||
var name = prompt("Name your constructor:", "");
|
||||
if (name)
|
||||
this.mirror.replaceSelection("function " + name + "() {\n \n}\n\n" + name + ".prototype = {\n \n};\n");
|
||||
},
|
||||
|
||||
reindent: function() {
|
||||
this.mirror.reindent();
|
||||
}
|
||||
};
|
|
@ -0,0 +1,155 @@
|
|||
/* Simple parser for CSS */
|
||||
|
||||
var CSSParser = Editor.Parser = (function() {
|
||||
var tokenizeCSS = (function() {
|
||||
function normal(source, setState) {
|
||||
var ch = source.next();
|
||||
if (ch == "@") {
|
||||
source.nextWhileMatches(/\w/);
|
||||
return "css-at";
|
||||
}
|
||||
else if (ch == "/" && source.equals("*")) {
|
||||
setState(inCComment);
|
||||
return null;
|
||||
}
|
||||
else if (ch == "<" && source.equals("!")) {
|
||||
setState(inSGMLComment);
|
||||
return null;
|
||||
}
|
||||
else if (ch == "=") {
|
||||
return "css-compare";
|
||||
}
|
||||
else if (source.equals("=") && (ch == "~" || ch == "|")) {
|
||||
source.next();
|
||||
return "css-compare";
|
||||
}
|
||||
else if (ch == "\"" || ch == "'") {
|
||||
setState(inString(ch));
|
||||
return null;
|
||||
}
|
||||
else if (ch == "#") {
|
||||
source.nextWhileMatches(/\w/);
|
||||
return "css-hash";
|
||||
}
|
||||
else if (ch == "!") {
|
||||
source.nextWhileMatches(/[ \t]/);
|
||||
source.nextWhileMatches(/\w/);
|
||||
return "css-important";
|
||||
}
|
||||
else if (/\d/.test(ch)) {
|
||||
source.nextWhileMatches(/[\w.%]/);
|
||||
return "css-unit";
|
||||
}
|
||||
else if (/[,.+>*\/]/.test(ch)) {
|
||||
return "css-select-op";
|
||||
}
|
||||
else if (/[;{}:\[\]]/.test(ch)) {
|
||||
return "css-punctuation";
|
||||
}
|
||||
else {
|
||||
source.nextWhileMatches(/[\w\\\-_]/);
|
||||
return "css-identifier";
|
||||
}
|
||||
}
|
||||
|
||||
function inCComment(source, setState) {
|
||||
var maybeEnd = false;
|
||||
while (!source.endOfLine()) {
|
||||
var ch = source.next();
|
||||
if (maybeEnd && ch == "/") {
|
||||
setState(normal);
|
||||
break;
|
||||
}
|
||||
maybeEnd = (ch == "*");
|
||||
}
|
||||
return "css-comment";
|
||||
}
|
||||
|
||||
function inSGMLComment(source, setState) {
|
||||
var dashes = 0;
|
||||
while (!source.endOfLine()) {
|
||||
var ch = source.next();
|
||||
if (dashes >= 2 && ch == ">") {
|
||||
setState(normal);
|
||||
break;
|
||||
}
|
||||
dashes = (ch == "-") ? dashes + 1 : 0;
|
||||
}
|
||||
return "css-comment";
|
||||
}
|
||||
|
||||
function inString(quote) {
|
||||
return function(source, setState) {
|
||||
var escaped = false;
|
||||
while (!source.endOfLine()) {
|
||||
var ch = source.next();
|
||||
if (ch == quote && !escaped)
|
||||
break;
|
||||
escaped = !escaped && ch == "\\";
|
||||
}
|
||||
if (!escaped)
|
||||
setState(normal);
|
||||
return "css-string";
|
||||
};
|
||||
}
|
||||
|
||||
return function(source, startState) {
|
||||
return tokenizer(source, startState || normal);
|
||||
};
|
||||
})();
|
||||
|
||||
function indentCSS(inBraces, inRule, base) {
|
||||
return function(nextChars) {
|
||||
if (!inBraces || /^\}/.test(nextChars)) return base;
|
||||
else if (inRule) return base + indentUnit * 2;
|
||||
else return base + indentUnit;
|
||||
};
|
||||
}
|
||||
|
||||
// This is a very simplistic parser -- since CSS does not really
|
||||
// nest, it works acceptably well, but some nicer colouroing could
|
||||
// be provided with a more complicated parser.
|
||||
function parseCSS(source, basecolumn) {
|
||||
basecolumn = basecolumn || 0;
|
||||
var tokens = tokenizeCSS(source);
|
||||
var inBraces = false, inRule = false;
|
||||
|
||||
var iter = {
|
||||
next: function() {
|
||||
var token = tokens.next(), style = token.style, content = token.content;
|
||||
|
||||
if (style == "css-identifier" && inRule)
|
||||
token.style = "css-value";
|
||||
if (style == "css-hash")
|
||||
token.style = inRule ? "css-colorcode" : "css-identifier";
|
||||
|
||||
if (content == "\n")
|
||||
token.indentation = indentCSS(inBraces, inRule, basecolumn);
|
||||
|
||||
if (content == "{")
|
||||
inBraces = true;
|
||||
else if (content == "}")
|
||||
inBraces = inRule = false;
|
||||
else if (inBraces && content == ";")
|
||||
inRule = false;
|
||||
else if (inBraces && style != "css-comment" && style != "whitespace")
|
||||
inRule = true;
|
||||
|
||||
return token;
|
||||
},
|
||||
|
||||
copy: function() {
|
||||
var _inBraces = inBraces, _inRule = inRule, _tokenState = tokens.state;
|
||||
return function(source) {
|
||||
tokens = tokenizeCSS(source, _tokenState);
|
||||
inBraces = _inBraces;
|
||||
inRule = _inRule;
|
||||
return iter;
|
||||
};
|
||||
}
|
||||
};
|
||||
return iter;
|
||||
}
|
||||
|
||||
return {make: parseCSS, electricChars: "}"};
|
||||
})();
|
|
@ -0,0 +1,32 @@
|
|||
var DummyParser = Editor.Parser = (function() {
|
||||
function tokenizeDummy(source) {
|
||||
while (!source.endOfLine()) source.next();
|
||||
return "text";
|
||||
}
|
||||
function parseDummy(source) {
|
||||
function indentTo(n) {return function() {return n;}}
|
||||
source = tokenizer(source, tokenizeDummy);
|
||||
var space = 0;
|
||||
|
||||
var iter = {
|
||||
next: function() {
|
||||
var tok = source.next();
|
||||
if (tok.type == "whitespace") {
|
||||
if (tok.value == "\n") tok.indentation = indentTo(space);
|
||||
else space = tok.value.length;
|
||||
}
|
||||
return tok;
|
||||
},
|
||||
copy: function() {
|
||||
var _space = space;
|
||||
return function(_source) {
|
||||
space = _space;
|
||||
source = tokenizer(_source, tokenizeDummy);
|
||||
return iter;
|
||||
};
|
||||
}
|
||||
};
|
||||
return iter;
|
||||
}
|
||||
return {make: parseDummy};
|
||||
})();
|
|
@ -0,0 +1,74 @@
|
|||
var HTMLMixedParser = Editor.Parser = (function() {
|
||||
if (!(CSSParser && JSParser && XMLParser))
|
||||
throw new Error("CSS, JS, and XML parsers must be loaded for HTML mixed mode to work.");
|
||||
XMLParser.configure({useHTMLKludges: true});
|
||||
|
||||
function parseMixed(stream) {
|
||||
var htmlParser = XMLParser.make(stream), localParser = null, inTag = false;
|
||||
var iter = {next: top, copy: copy};
|
||||
|
||||
function top() {
|
||||
var token = htmlParser.next();
|
||||
if (token.content == "<")
|
||||
inTag = true;
|
||||
else if (token.style == "xml-tagname" && inTag === true)
|
||||
inTag = token.content.toLowerCase();
|
||||
else if (token.content == ">") {
|
||||
if (inTag == "script")
|
||||
iter.next = local(JSParser, "</script");
|
||||
else if (inTag == "style")
|
||||
iter.next = local(CSSParser, "</style");
|
||||
inTag = false;
|
||||
}
|
||||
return token;
|
||||
}
|
||||
function local(parser, tag) {
|
||||
var baseIndent = htmlParser.indentation();
|
||||
localParser = parser.make(stream, baseIndent + indentUnit);
|
||||
return function() {
|
||||
if (stream.lookAhead(tag, false, false, true)) {
|
||||
localParser = null;
|
||||
iter.next = top;
|
||||
return top();
|
||||
}
|
||||
|
||||
var token = localParser.next();
|
||||
var lt = token.value.lastIndexOf("<"), sz = Math.min(token.value.length - lt, tag.length);
|
||||
if (lt != -1 && token.value.slice(lt, lt + sz).toLowerCase() == tag.slice(0, sz) &&
|
||||
stream.lookAhead(tag.slice(sz), false, false, true)) {
|
||||
stream.push(token.value.slice(lt));
|
||||
token.value = token.value.slice(0, lt);
|
||||
}
|
||||
|
||||
if (token.indentation) {
|
||||
var oldIndent = token.indentation;
|
||||
token.indentation = function(chars) {
|
||||
if (chars == "</")
|
||||
return baseIndent;
|
||||
else
|
||||
return oldIndent(chars);
|
||||
}
|
||||
}
|
||||
|
||||
return token;
|
||||
};
|
||||
}
|
||||
|
||||
function copy() {
|
||||
var _html = htmlParser.copy(), _local = localParser && localParser.copy(),
|
||||
_next = iter.next, _inTag = inTag;
|
||||
return function(_stream) {
|
||||
stream = _stream;
|
||||
htmlParser = _html(_stream);
|
||||
localParser = _local && _local(_stream);
|
||||
iter.next = _next;
|
||||
inTag = _inTag;
|
||||
return iter;
|
||||
};
|
||||
}
|
||||
return iter;
|
||||
}
|
||||
|
||||
return {make: parseMixed, electricChars: "{}/:"};
|
||||
|
||||
})();
|
|
@ -0,0 +1,350 @@
|
|||
/* Parse function for JavaScript. Makes use of the tokenizer from
|
||||
* tokenizejavascript.js. Note that your parsers do not have to be
|
||||
* this complicated -- if you don't want to recognize local variables,
|
||||
* in many languages it is enough to just look for braces, semicolons,
|
||||
* parentheses, etc, and know when you are inside a string or comment.
|
||||
*
|
||||
* See manual.html for more info about the parser interface.
|
||||
*/
|
||||
|
||||
var JSParser = Editor.Parser = (function() {
|
||||
// Token types that can be considered to be atoms.
|
||||
var atomicTypes = {"atom": true, "number": true, "variable": true, "string": true, "regexp": true};
|
||||
// Setting that can be used to have JSON data indent properly.
|
||||
var json = false;
|
||||
// Constructor for the lexical context objects.
|
||||
function JSLexical(indented, column, type, align, prev, info) {
|
||||
// indentation at start of this line
|
||||
this.indented = indented;
|
||||
// column at which this scope was opened
|
||||
this.column = column;
|
||||
// type of scope ('vardef', 'stat' (statement), 'form' (special form), '[', '{', or '(')
|
||||
this.type = type;
|
||||
// '[', '{', or '(' blocks that have any text after their opening
|
||||
// character are said to be 'aligned' -- any lines below are
|
||||
// indented all the way to the opening character.
|
||||
if (align != null)
|
||||
this.align = align;
|
||||
// Parent scope, if any.
|
||||
this.prev = prev;
|
||||
this.info = info;
|
||||
}
|
||||
|
||||
// My favourite JavaScript indentation rules.
|
||||
function indentJS(lexical) {
|
||||
return function(firstChars) {
|
||||
var firstChar = firstChars && firstChars.charAt(0), type = lexical.type;
|
||||
var closing = firstChar == type;
|
||||
if (type == "vardef")
|
||||
return lexical.indented + 4;
|
||||
else if (type == "form" && firstChar == "{")
|
||||
return lexical.indented;
|
||||
else if (type == "stat" || type == "form")
|
||||
return lexical.indented + indentUnit;
|
||||
else if (lexical.info == "switch" && !closing)
|
||||
return lexical.indented + (/^(?:case|default)\b/.test(firstChars) ? indentUnit : 2 * indentUnit);
|
||||
else if (lexical.align)
|
||||
return lexical.column - (closing ? 1 : 0);
|
||||
else
|
||||
return lexical.indented + (closing ? 0 : indentUnit);
|
||||
};
|
||||
}
|
||||
|
||||
// The parser-iterator-producing function itself.
|
||||
function parseJS(input, basecolumn) {
|
||||
// Wrap the input in a token stream
|
||||
var tokens = tokenizeJavaScript(input);
|
||||
// The parser state. cc is a stack of actions that have to be
|
||||
// performed to finish the current statement. For example we might
|
||||
// know that we still need to find a closing parenthesis and a
|
||||
// semicolon. Actions at the end of the stack go first. It is
|
||||
// initialized with an infinitely looping action that consumes
|
||||
// whole statements.
|
||||
var cc = [statements];
|
||||
// Context contains information about the current local scope, the
|
||||
// variables defined in that, and the scopes above it.
|
||||
var context = null;
|
||||
// The lexical scope, used mostly for indentation.
|
||||
var lexical = new JSLexical((basecolumn || 0) - indentUnit, 0, "block", false);
|
||||
// Current column, and the indentation at the start of the current
|
||||
// line. Used to create lexical scope objects.
|
||||
var column = 0;
|
||||
var indented = 0;
|
||||
// Variables which are used by the mark, cont, and pass functions
|
||||
// below to communicate with the driver loop in the 'next'
|
||||
// function.
|
||||
var consume, marked;
|
||||
|
||||
// The iterator object.
|
||||
var parser = {next: next, copy: copy};
|
||||
|
||||
function next(){
|
||||
// Start by performing any 'lexical' actions (adjusting the
|
||||
// lexical variable), or the operations below will be working
|
||||
// with the wrong lexical state.
|
||||
while(cc[cc.length - 1].lex)
|
||||
cc.pop()();
|
||||
|
||||
// Fetch a token.
|
||||
var token = tokens.next();
|
||||
|
||||
// Adjust column and indented.
|
||||
if (token.type == "whitespace" && column == 0)
|
||||
indented = token.value.length;
|
||||
column += token.value.length;
|
||||
if (token.content == "\n"){
|
||||
indented = column = 0;
|
||||
// If the lexical scope's align property is still undefined at
|
||||
// the end of the line, it is an un-aligned scope.
|
||||
if (!("align" in lexical))
|
||||
lexical.align = false;
|
||||
// Newline tokens get an indentation function associated with
|
||||
// them.
|
||||
token.indentation = indentJS(lexical);
|
||||
}
|
||||
// No more processing for meaningless tokens.
|
||||
if (token.type == "whitespace" || token.type == "comment")
|
||||
return token;
|
||||
// When a meaningful token is found and the lexical scope's
|
||||
// align is undefined, it is an aligned scope.
|
||||
if (!("align" in lexical))
|
||||
lexical.align = true;
|
||||
|
||||
// Execute actions until one 'consumes' the token and we can
|
||||
// return it.
|
||||
while(true) {
|
||||
consume = marked = false;
|
||||
// Take and execute the topmost action.
|
||||
cc.pop()(token.type, token.content);
|
||||
if (consume){
|
||||
// Marked is used to change the style of the current token.
|
||||
if (marked)
|
||||
token.style = marked;
|
||||
// Here we differentiate between local and global variables.
|
||||
else if (token.type == "variable" && inScope(token.content))
|
||||
token.style = "js-localvariable";
|
||||
return token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This makes a copy of the parser state. It stores all the
|
||||
// stateful variables in a closure, and returns a function that
|
||||
// will restore them when called with a new input stream. Note
|
||||
// that the cc array has to be copied, because it is contantly
|
||||
// being modified. Lexical objects are not mutated, and context
|
||||
// objects are not mutated in a harmful way, so they can be shared
|
||||
// between runs of the parser.
|
||||
function copy(){
|
||||
var _context = context, _lexical = lexical, _cc = cc.concat([]), _tokenState = tokens.state;
|
||||
|
||||
return function copyParser(input){
|
||||
context = _context;
|
||||
lexical = _lexical;
|
||||
cc = _cc.concat([]); // copies the array
|
||||
column = indented = 0;
|
||||
tokens = tokenizeJavaScript(input, _tokenState);
|
||||
return parser;
|
||||
};
|
||||
}
|
||||
|
||||
// Helper function for pushing a number of actions onto the cc
|
||||
// stack in reverse order.
|
||||
function push(fs){
|
||||
for (var i = fs.length - 1; i >= 0; i--)
|
||||
cc.push(fs[i]);
|
||||
}
|
||||
// cont and pass are used by the action functions to add other
|
||||
// actions to the stack. cont will cause the current token to be
|
||||
// consumed, pass will leave it for the next action.
|
||||
function cont(){
|
||||
push(arguments);
|
||||
consume = true;
|
||||
}
|
||||
function pass(){
|
||||
push(arguments);
|
||||
consume = false;
|
||||
}
|
||||
// Used to change the style of the current token.
|
||||
function mark(style){
|
||||
marked = style;
|
||||
}
|
||||
|
||||
// Push a new scope. Will automatically link the current scope.
|
||||
function pushcontext(){
|
||||
context = {prev: context, vars: {"this": true, "arguments": true}};
|
||||
}
|
||||
// Pop off the current scope.
|
||||
function popcontext(){
|
||||
context = context.prev;
|
||||
}
|
||||
// Register a variable in the current scope.
|
||||
function register(varname){
|
||||
if (context){
|
||||
mark("js-variabledef");
|
||||
context.vars[varname] = true;
|
||||
}
|
||||
}
|
||||
// Check whether a variable is defined in the current scope.
|
||||
function inScope(varname){
|
||||
var cursor = context;
|
||||
while (cursor) {
|
||||
if (cursor.vars[varname])
|
||||
return true;
|
||||
cursor = cursor.prev;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Push a new lexical context of the given type.
|
||||
function pushlex(type, info) {
|
||||
var result = function(){
|
||||
lexical = new JSLexical(indented, column, type, null, lexical, info)
|
||||
};
|
||||
result.lex = true;
|
||||
return result;
|
||||
}
|
||||
// Pop off the current lexical context.
|
||||
function poplex(){
|
||||
lexical = lexical.prev;
|
||||
}
|
||||
poplex.lex = true;
|
||||
// The 'lex' flag on these actions is used by the 'next' function
|
||||
// to know they can (and have to) be ran before moving on to the
|
||||
// next token.
|
||||
|
||||
// Creates an action that discards tokens until it finds one of
|
||||
// the given type.
|
||||
function expect(wanted){
|
||||
return function expecting(type){
|
||||
if (type == wanted) cont();
|
||||
else cont(arguments.callee);
|
||||
};
|
||||
}
|
||||
|
||||
// Looks for a statement, and then calls itself.
|
||||
function statements(type){
|
||||
return pass(statement, statements);
|
||||
}
|
||||
// Dispatches various types of statements based on the type of the
|
||||
// current token.
|
||||
function statement(type){
|
||||
if (type == "var") cont(pushlex("vardef"), vardef1, expect(";"), poplex);
|
||||
else if (type == "keyword a") cont(pushlex("form"), expression, statement, poplex);
|
||||
else if (type == "keyword b") cont(pushlex("form"), statement, poplex);
|
||||
else if (type == "{" && json) cont(pushlex("}"), commasep(objprop, "}"), poplex);
|
||||
else if (type == "{") cont(pushlex("}"), block, poplex);
|
||||
else if (type == "function") cont(functiondef);
|
||||
else if (type == "for") cont(pushlex("form"), expect("("), pushlex(")"), forspec1, expect(")"), poplex, statement, poplex);
|
||||
else if (type == "variable") cont(pushlex("stat"), maybelabel);
|
||||
else if (type == "switch") cont(pushlex("form"), expression, pushlex("}", "switch"), expect("{"), block, poplex, poplex);
|
||||
else if (type == "case") cont(expression, expect(":"));
|
||||
else if (type == "default") cont(expect(":"));
|
||||
else if (type == "catch") cont(pushlex("form"), pushcontext, expect("("), funarg, expect(")"), statement, poplex, popcontext);
|
||||
else pass(pushlex("stat"), expression, expect(";"), poplex);
|
||||
}
|
||||
// Dispatch expression types.
|
||||
function expression(type){
|
||||
if (atomicTypes.hasOwnProperty(type)) cont(maybeoperator);
|
||||
else if (type == "function") cont(functiondef);
|
||||
else if (type == "keyword c") cont(expression);
|
||||
else if (type == "(") cont(pushlex(")"), expression, expect(")"), poplex, maybeoperator);
|
||||
else if (type == "operator") cont(expression);
|
||||
else if (type == "[") cont(pushlex("]"), commasep(expression, "]"), poplex, maybeoperator);
|
||||
else if (type == "{") cont(pushlex("}"), commasep(objprop, "}"), poplex, maybeoperator);
|
||||
}
|
||||
// Called for places where operators, function calls, or
|
||||
// subscripts are valid. Will skip on to the next action if none
|
||||
// is found.
|
||||
function maybeoperator(type){
|
||||
if (type == "operator") cont(expression);
|
||||
else if (type == "(") cont(pushlex(")"), expression, commasep(expression, ")"), poplex, maybeoperator);
|
||||
else if (type == ".") cont(property, maybeoperator);
|
||||
else if (type == "[") cont(pushlex("]"), expression, expect("]"), poplex, maybeoperator);
|
||||
}
|
||||
// When a statement starts with a variable name, it might be a
|
||||
// label. If no colon follows, it's a regular statement.
|
||||
function maybelabel(type){
|
||||
if (type == ":") cont(poplex, statement);
|
||||
else pass(maybeoperator, expect(";"), poplex);
|
||||
}
|
||||
// Property names need to have their style adjusted -- the
|
||||
// tokenizer thinks they are variables.
|
||||
function property(type){
|
||||
if (type == "variable") {mark("js-property"); cont();}
|
||||
}
|
||||
// This parses a property and its value in an object literal.
|
||||
function objprop(type){
|
||||
if (type == "variable") mark("js-property");
|
||||
if (atomicTypes.hasOwnProperty(type)) cont(expect(":"), expression);
|
||||
}
|
||||
// Parses a comma-separated list of the things that are recognized
|
||||
// by the 'what' argument.
|
||||
function commasep(what, end){
|
||||
function proceed(type) {
|
||||
if (type == ",") cont(what, proceed);
|
||||
else if (type == end) cont();
|
||||
else cont(expect(end));
|
||||
};
|
||||
return function commaSeparated(type) {
|
||||
if (type == end) cont();
|
||||
else pass(what, proceed);
|
||||
};
|
||||
}
|
||||
// Look for statements until a closing brace is found.
|
||||
function block(type){
|
||||
if (type == "}") cont();
|
||||
else pass(statement, block);
|
||||
}
|
||||
// Variable definitions are split into two actions -- 1 looks for
|
||||
// a name or the end of the definition, 2 looks for an '=' sign or
|
||||
// a comma.
|
||||
function vardef1(type, value){
|
||||
if (type == "variable"){register(value); cont(vardef2);}
|
||||
else cont();
|
||||
}
|
||||
function vardef2(type, value){
|
||||
if (value == "=") cont(expression, vardef2);
|
||||
else if (type == ",") cont(vardef1);
|
||||
}
|
||||
// For loops.
|
||||
function forspec1(type){
|
||||
if (type == "var") cont(vardef1, forspec2);
|
||||
else if (type == ";") pass(forspec2);
|
||||
else if (type == "variable") cont(formaybein);
|
||||
else pass(forspec2);
|
||||
}
|
||||
function formaybein(type, value){
|
||||
if (value == "in") cont(expression);
|
||||
else cont(maybeoperator, forspec2);
|
||||
}
|
||||
function forspec2(type, value){
|
||||
if (type == ";") cont(forspec3);
|
||||
else if (value == "in") cont(expression);
|
||||
else cont(expression, expect(";"), forspec3);
|
||||
}
|
||||
function forspec3(type) {
|
||||
if (type == ")") pass();
|
||||
else cont(expression);
|
||||
}
|
||||
// A function definition creates a new context, and the variables
|
||||
// in its argument list have to be added to this context.
|
||||
function functiondef(type, value){
|
||||
if (type == "variable"){register(value); cont(functiondef);}
|
||||
else if (type == "(") cont(pushcontext, commasep(funarg, ")"), statement, popcontext);
|
||||
}
|
||||
function funarg(type, value){
|
||||
if (type == "variable"){register(value); cont();}
|
||||
}
|
||||
|
||||
return parser;
|
||||
}
|
||||
|
||||
return {
|
||||
make: parseJS,
|
||||
electricChars: "{}:",
|
||||
configure: function(obj) {
|
||||
if (obj.json != null) json = obj.json;
|
||||
}
|
||||
};
|
||||
})();
|
|
@ -0,0 +1,162 @@
|
|||
var SparqlParser = Editor.Parser = (function() {
|
||||
function wordRegexp(words) {
|
||||
return new RegExp("^(?:" + words.join("|") + ")$", "i");
|
||||
}
|
||||
var ops = wordRegexp(["str", "lang", "langmatches", "datatype", "bound", "sameterm", "isiri", "isuri",
|
||||
"isblank", "isliteral", "union", "a"]);
|
||||
var keywords = wordRegexp(["base", "prefix", "select", "distinct", "reduced", "construct", "describe",
|
||||
"ask", "from", "named", "where", "order", "limit", "offset", "filter", "optional",
|
||||
"graph", "by", "asc", "desc", ]);
|
||||
var operatorChars = /[*+\-<>=&|]/;
|
||||
|
||||
var tokenizeSparql = (function() {
|
||||
function normal(source, setState) {
|
||||
var ch = source.next();
|
||||
if (ch == "$" || ch == "?") {
|
||||
source.nextWhileMatches(/[\w\d]/);
|
||||
return "sp-var";
|
||||
}
|
||||
else if (ch == "<" && !source.matches(/[\s\u00a0=]/)) {
|
||||
source.nextWhileMatches(/[^\s\u00a0>]/);
|
||||
if (source.equals(">")) source.next();
|
||||
return "sp-uri";
|
||||
}
|
||||
else if (ch == "\"" || ch == "'") {
|
||||
setState(inLiteral(ch));
|
||||
return null;
|
||||
}
|
||||
else if (/[{}\(\),\.;\[\]]/.test(ch)) {
|
||||
return "sp-punc";
|
||||
}
|
||||
else if (ch == "#") {
|
||||
while (!source.endOfLine()) source.next();
|
||||
return "sp-comment";
|
||||
}
|
||||
else if (operatorChars.test(ch)) {
|
||||
source.nextWhileMatches(operatorChars);
|
||||
return "sp-operator";
|
||||
}
|
||||
else if (ch == ":") {
|
||||
source.nextWhileMatches(/[\w\d\._\-]/);
|
||||
return "sp-prefixed";
|
||||
}
|
||||
else {
|
||||
source.nextWhileMatches(/[_\w\d]/);
|
||||
if (source.equals(":")) {
|
||||
source.next();
|
||||
source.nextWhileMatches(/[\w\d_\-]/);
|
||||
return "sp-prefixed";
|
||||
}
|
||||
var word = source.get(), type;
|
||||
if (ops.test(word))
|
||||
type = "sp-operator";
|
||||
else if (keywords.test(word))
|
||||
type = "sp-keyword";
|
||||
else
|
||||
type = "sp-word";
|
||||
return {style: type, content: word};
|
||||
}
|
||||
}
|
||||
|
||||
function inLiteral(quote) {
|
||||
return function(source, setState) {
|
||||
var escaped = false;
|
||||
while (!source.endOfLine()) {
|
||||
var ch = source.next();
|
||||
if (ch == quote && !escaped) {
|
||||
setState(normal);
|
||||
break;
|
||||
}
|
||||
escaped = !escaped && ch == "\\";
|
||||
}
|
||||
return "sp-literal";
|
||||
};
|
||||
}
|
||||
|
||||
return function(source, startState) {
|
||||
return tokenizer(source, startState || normal);
|
||||
};
|
||||
})();
|
||||
|
||||
function indentSparql(context) {
|
||||
return function(nextChars) {
|
||||
var firstChar = nextChars && nextChars.charAt(0);
|
||||
if (/[\]\}]/.test(firstChar))
|
||||
while (context && context.type == "pattern") context = context.prev;
|
||||
|
||||
var closing = context && firstChar == matching[context.type];
|
||||
if (!context)
|
||||
return 0;
|
||||
else if (context.type == "pattern")
|
||||
return context.col;
|
||||
else if (context.align)
|
||||
return context.col - (closing ? context.width : 0);
|
||||
else
|
||||
return context.indent + (closing ? 0 : indentUnit);
|
||||
}
|
||||
}
|
||||
|
||||
function parseSparql(source) {
|
||||
var tokens = tokenizeSparql(source);
|
||||
var context = null, indent = 0, col = 0;
|
||||
function pushContext(type, width) {
|
||||
context = {prev: context, indent: indent, col: col, type: type, width: width};
|
||||
}
|
||||
function popContext() {
|
||||
context = context.prev;
|
||||
}
|
||||
|
||||
var iter = {
|
||||
next: function() {
|
||||
var token = tokens.next(), type = token.style, content = token.content, width = token.value.length;
|
||||
|
||||
if (content == "\n") {
|
||||
token.indentation = indentSparql(context);
|
||||
indent = col = 0;
|
||||
if (context && context.align == null) context.align = false;
|
||||
}
|
||||
else if (type == "whitespace" && col == 0) {
|
||||
indent = width;
|
||||
}
|
||||
else if (type != "sp-comment" && context && context.align == null) {
|
||||
context.align = true;
|
||||
}
|
||||
|
||||
if (content != "\n") col += width;
|
||||
|
||||
if (/[\[\{\(]/.test(content)) {
|
||||
pushContext(content, width);
|
||||
}
|
||||
else if (/[\]\}\)]/.test(content)) {
|
||||
while (context && context.type == "pattern")
|
||||
popContext();
|
||||
if (context && content == matching[context.type])
|
||||
popContext();
|
||||
}
|
||||
else if (content == "." && context && context.type == "pattern") {
|
||||
popContext();
|
||||
}
|
||||
else if ((type == "sp-word" || type == "sp-prefixed" || type == "sp-uri" || type == "sp-var" || type == "sp-literal") &&
|
||||
context && /[\{\[]/.test(context.type)) {
|
||||
pushContext("pattern", width);
|
||||
}
|
||||
|
||||
return token;
|
||||
},
|
||||
|
||||
copy: function() {
|
||||
var _context = context, _indent = indent, _col = col, _tokenState = tokens.state;
|
||||
return function(source) {
|
||||
tokens = tokenizeSparql(source, _tokenState);
|
||||
context = _context;
|
||||
indent = _indent;
|
||||
col = _col;
|
||||
return iter;
|
||||
};
|
||||
}
|
||||
};
|
||||
return iter;
|
||||
}
|
||||
|
||||
return {make: parseSparql, electricChars: "}]"};
|
||||
})();
|
|
@ -0,0 +1,286 @@
|
|||
/* This file defines an XML parser, with a few kludges to make it
|
||||
* useable for HTML. autoSelfClosers defines a set of tag names that
|
||||
* are expected to not have a closing tag, and doNotIndent specifies
|
||||
* the tags inside of which no indentation should happen (see Config
|
||||
* object). These can be disabled by passing the editor an object like
|
||||
* {useHTMLKludges: false} as parserConfig option.
|
||||
*/
|
||||
|
||||
var XMLParser = Editor.Parser = (function() {
|
||||
var Kludges = {
|
||||
autoSelfClosers: {"br": true, "img": true, "hr": true, "link": true, "input": true,
|
||||
"meta": true, "col": true, "frame": true, "base": true, "area": true},
|
||||
doNotIndent: {"pre": true, "!cdata": true}
|
||||
};
|
||||
var NoKludges = {autoSelfClosers: {}, doNotIndent: {"!cdata": true}};
|
||||
var UseKludges = Kludges;
|
||||
var alignCDATA = false;
|
||||
|
||||
// Simple stateful tokenizer for XML documents. Returns a
|
||||
// MochiKit-style iterator, with a state property that contains a
|
||||
// function encapsulating the current state. See tokenize.js.
|
||||
var tokenizeXML = (function() {
|
||||
function inText(source, setState) {
|
||||
var ch = source.next();
|
||||
if (ch == "<") {
|
||||
if (source.equals("!")) {
|
||||
source.next();
|
||||
if (source.equals("[")) {
|
||||
if (source.lookAhead("[CDATA[", true)) {
|
||||
setState(inBlock("xml-cdata", "]]>"));
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
return "xml-text";
|
||||
}
|
||||
}
|
||||
else if (source.lookAhead("--", true)) {
|
||||
setState(inBlock("xml-comment", "-->"));
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
return "xml-text";
|
||||
}
|
||||
}
|
||||
else if (source.equals("?")) {
|
||||
source.next();
|
||||
source.nextWhileMatches(/[\w\._\-]/);
|
||||
setState(inBlock("xml-processing", "?>"));
|
||||
return "xml-processing";
|
||||
}
|
||||
else {
|
||||
if (source.equals("/")) source.next();
|
||||
setState(inTag);
|
||||
return "xml-punctuation";
|
||||
}
|
||||
}
|
||||
else if (ch == "&") {
|
||||
while (!source.endOfLine()) {
|
||||
if (source.next() == ";")
|
||||
break;
|
||||
}
|
||||
return "xml-entity";
|
||||
}
|
||||
else {
|
||||
source.nextWhileMatches(/[^&<\n]/);
|
||||
return "xml-text";
|
||||
}
|
||||
}
|
||||
|
||||
function inTag(source, setState) {
|
||||
var ch = source.next();
|
||||
if (ch == ">") {
|
||||
setState(inText);
|
||||
return "xml-punctuation";
|
||||
}
|
||||
else if (/[?\/]/.test(ch) && source.equals(">")) {
|
||||
source.next();
|
||||
setState(inText);
|
||||
return "xml-punctuation";
|
||||
}
|
||||
else if (ch == "=") {
|
||||
return "xml-punctuation";
|
||||
}
|
||||
else if (/[\'\"]/.test(ch)) {
|
||||
setState(inAttribute(ch));
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
source.nextWhileMatches(/[^\s\u00a0=<>\"\'\/?]/);
|
||||
return "xml-name";
|
||||
}
|
||||
}
|
||||
|
||||
function inAttribute(quote) {
|
||||
return function(source, setState) {
|
||||
while (!source.endOfLine()) {
|
||||
if (source.next() == quote) {
|
||||
setState(inTag);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return "xml-attribute";
|
||||
};
|
||||
}
|
||||
|
||||
function inBlock(style, terminator) {
|
||||
return function(source, setState) {
|
||||
while (!source.endOfLine()) {
|
||||
if (source.lookAhead(terminator, true)) {
|
||||
setState(inText);
|
||||
break;
|
||||
}
|
||||
source.next();
|
||||
}
|
||||
return style;
|
||||
};
|
||||
}
|
||||
|
||||
return function(source, startState) {
|
||||
return tokenizer(source, startState || inText);
|
||||
};
|
||||
})();
|
||||
|
||||
// The parser. The structure of this function largely follows that of
|
||||
// parseJavaScript in parsejavascript.js (there is actually a bit more
|
||||
// shared code than I'd like), but it is quite a bit simpler.
|
||||
function parseXML(source) {
|
||||
var tokens = tokenizeXML(source), token;
|
||||
var cc = [base];
|
||||
var tokenNr = 0, indented = 0;
|
||||
var currentTag = null, context = null;
|
||||
var consume;
|
||||
|
||||
function push(fs) {
|
||||
for (var i = fs.length - 1; i >= 0; i--)
|
||||
cc.push(fs[i]);
|
||||
}
|
||||
function cont() {
|
||||
push(arguments);
|
||||
consume = true;
|
||||
}
|
||||
function pass() {
|
||||
push(arguments);
|
||||
consume = false;
|
||||
}
|
||||
|
||||
function markErr() {
|
||||
token.style += " xml-error";
|
||||
}
|
||||
function expect(text) {
|
||||
return function(style, content) {
|
||||
if (content == text) cont();
|
||||
else {markErr(); cont(arguments.callee);}
|
||||
};
|
||||
}
|
||||
|
||||
function pushContext(tagname, startOfLine) {
|
||||
var noIndent = UseKludges.doNotIndent.hasOwnProperty(tagname) || (context && context.noIndent);
|
||||
context = {prev: context, name: tagname, indent: indented, startOfLine: startOfLine, noIndent: noIndent};
|
||||
}
|
||||
function popContext() {
|
||||
context = context.prev;
|
||||
}
|
||||
function computeIndentation(baseContext) {
|
||||
return function(nextChars, current) {
|
||||
var context = baseContext;
|
||||
if (context && context.noIndent)
|
||||
return current;
|
||||
if (alignCDATA && /<!\[CDATA\[/.test(nextChars))
|
||||
return 0;
|
||||
if (context && /^<\//.test(nextChars))
|
||||
context = context.prev;
|
||||
while (context && !context.startOfLine)
|
||||
context = context.prev;
|
||||
if (context)
|
||||
return context.indent + indentUnit;
|
||||
else
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
|
||||
function base() {
|
||||
return pass(element, base);
|
||||
}
|
||||
var harmlessTokens = {"xml-text": true, "xml-entity": true, "xml-comment": true, "xml-processing": true};
|
||||
function element(style, content) {
|
||||
if (content == "<") cont(tagname, attributes, endtag(tokenNr == 1));
|
||||
else if (content == "</") cont(closetagname, expect(">"));
|
||||
else if (style == "xml-cdata") {
|
||||
if (!context || context.name != "!cdata") pushContext("!cdata");
|
||||
if (/\]\]>$/.test(content)) popContext();
|
||||
cont();
|
||||
}
|
||||
else if (harmlessTokens.hasOwnProperty(style)) cont();
|
||||
else {markErr(); cont();}
|
||||
}
|
||||
function tagname(style, content) {
|
||||
if (style == "xml-name") {
|
||||
currentTag = content.toLowerCase();
|
||||
token.style = "xml-tagname";
|
||||
cont();
|
||||
}
|
||||
else {
|
||||
currentTag = null;
|
||||
pass();
|
||||
}
|
||||
}
|
||||
function closetagname(style, content) {
|
||||
if (style == "xml-name") {
|
||||
token.style = "xml-tagname";
|
||||
if (context && content.toLowerCase() == context.name) popContext();
|
||||
else markErr();
|
||||
}
|
||||
cont();
|
||||
}
|
||||
function endtag(startOfLine) {
|
||||
return function(style, content) {
|
||||
if (content == "/>" || (content == ">" && UseKludges.autoSelfClosers.hasOwnProperty(currentTag))) cont();
|
||||
else if (content == ">") {pushContext(currentTag, startOfLine); cont();}
|
||||
else {markErr(); cont(arguments.callee);}
|
||||
};
|
||||
}
|
||||
function attributes(style) {
|
||||
if (style == "xml-name") {token.style = "xml-attname"; cont(attribute, attributes);}
|
||||
else pass();
|
||||
}
|
||||
function attribute(style, content) {
|
||||
if (content == "=") cont(value);
|
||||
else if (content == ">" || content == "/>") pass(endtag);
|
||||
else pass();
|
||||
}
|
||||
function value(style) {
|
||||
if (style == "xml-attribute") cont(value);
|
||||
else pass();
|
||||
}
|
||||
|
||||
return {
|
||||
indentation: function() {return indented;},
|
||||
|
||||
next: function(){
|
||||
token = tokens.next();
|
||||
if (token.style == "whitespace" && tokenNr == 0)
|
||||
indented = token.value.length;
|
||||
else
|
||||
tokenNr++;
|
||||
if (token.content == "\n") {
|
||||
indented = tokenNr = 0;
|
||||
token.indentation = computeIndentation(context);
|
||||
}
|
||||
|
||||
if (token.style == "whitespace" || token.type == "xml-comment")
|
||||
return token;
|
||||
|
||||
while(true){
|
||||
consume = false;
|
||||
cc.pop()(token.style, token.content);
|
||||
if (consume) return token;
|
||||
}
|
||||
},
|
||||
|
||||
copy: function(){
|
||||
var _cc = cc.concat([]), _tokenState = tokens.state, _context = context;
|
||||
var parser = this;
|
||||
|
||||
return function(input){
|
||||
cc = _cc.concat([]);
|
||||
tokenNr = indented = 0;
|
||||
context = _context;
|
||||
tokens = tokenizeXML(input, _tokenState);
|
||||
return parser;
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
make: parseXML,
|
||||
electricChars: "/",
|
||||
configure: function(config) {
|
||||
if (config.useHTMLKludges != null)
|
||||
UseKludges = config.useHTMLKludges ? Kludges : NoKludges;
|
||||
if (config.alignCDATA)
|
||||
alignCDATA = config.alignCDATA;
|
||||
}
|
||||
};
|
||||
})();
|
|
@ -0,0 +1,619 @@
|
|||
/* Functionality for finding, storing, and restoring selections
|
||||
*
|
||||
* This does not provide a generic API, just the minimal functionality
|
||||
* required by the CodeMirror system.
|
||||
*/
|
||||
|
||||
// Namespace object.
|
||||
var select = {};
|
||||
|
||||
(function() {
|
||||
select.ie_selection = document.selection && document.selection.createRangeCollection;
|
||||
|
||||
// Find the 'top-level' (defined as 'a direct child of the node
|
||||
// passed as the top argument') node that the given node is
|
||||
// contained in. Return null if the given node is not inside the top
|
||||
// node.
|
||||
function topLevelNodeAt(node, top) {
|
||||
while (node && node.parentNode != top)
|
||||
node = node.parentNode;
|
||||
return node;
|
||||
}
|
||||
|
||||
// Find the top-level node that contains the node before this one.
|
||||
function topLevelNodeBefore(node, top) {
|
||||
while (!node.previousSibling && node.parentNode != top)
|
||||
node = node.parentNode;
|
||||
return topLevelNodeAt(node.previousSibling, top);
|
||||
}
|
||||
|
||||
var fourSpaces = "\u00a0\u00a0\u00a0\u00a0";
|
||||
|
||||
select.scrollToNode = function(element) {
|
||||
if (!element) return;
|
||||
var doc = element.ownerDocument, body = doc.body,
|
||||
win = (doc.defaultView || doc.parentWindow),
|
||||
html = doc.documentElement,
|
||||
atEnd = !element.nextSibling || !element.nextSibling.nextSibling
|
||||
|| !element.nextSibling.nextSibling.nextSibling;
|
||||
// In Opera (and recent Webkit versions), BR elements *always*
|
||||
// have a offsetTop property of zero.
|
||||
var compensateHack = 0;
|
||||
while (element && !element.offsetTop) {
|
||||
compensateHack++;
|
||||
element = element.previousSibling;
|
||||
}
|
||||
// atEnd is another kludge for these browsers -- if the cursor is
|
||||
// at the end of the document, and the node doesn't have an
|
||||
// offset, just scroll to the end.
|
||||
if (compensateHack == 0) atEnd = false;
|
||||
|
||||
var y = compensateHack * (element ? element.offsetHeight : 0), x = 0, pos = element;
|
||||
while (pos && pos.offsetParent) {
|
||||
y += pos.offsetTop;
|
||||
// Don't count X offset for <br> nodes
|
||||
if (!isBR(pos))
|
||||
x += pos.offsetLeft;
|
||||
pos = pos.offsetParent;
|
||||
}
|
||||
|
||||
var scroll_x = body.scrollLeft || html.scrollLeft || 0,
|
||||
scroll_y = body.scrollTop || html.scrollTop || 0,
|
||||
screen_x = x - scroll_x, screen_y = y - scroll_y, scroll = false;
|
||||
|
||||
if (screen_x < 0 || screen_x > (win.innerWidth || html.clientWidth || 0)) {
|
||||
scroll_x = x;
|
||||
scroll = true;
|
||||
}
|
||||
if (screen_y < 0 || atEnd || screen_y > (win.innerHeight || html.clientHeight || 0) - 50) {
|
||||
scroll_y = atEnd ? 1e6 : y;
|
||||
scroll = true;
|
||||
}
|
||||
if (scroll) win.scrollTo(scroll_x, scroll_y);
|
||||
};
|
||||
|
||||
select.scrollToCursor = function(container) {
|
||||
select.scrollToNode(select.selectionTopNode(container, true) || container.firstChild);
|
||||
};
|
||||
|
||||
// Used to prevent restoring a selection when we do not need to.
|
||||
var currentSelection = null;
|
||||
|
||||
select.snapshotChanged = function() {
|
||||
if (currentSelection) currentSelection.changed = true;
|
||||
};
|
||||
|
||||
// This is called by the code in editor.js whenever it is replacing
|
||||
// a text node. The function sees whether the given oldNode is part
|
||||
// of the current selection, and updates this selection if it is.
|
||||
// Because nodes are often only partially replaced, the length of
|
||||
// the part that gets replaced has to be taken into account -- the
|
||||
// selection might stay in the oldNode if the newNode is smaller
|
||||
// than the selection's offset. The offset argument is needed in
|
||||
// case the selection does move to the new object, and the given
|
||||
// length is not the whole length of the new node (part of it might
|
||||
// have been used to replace another node).
|
||||
select.snapshotReplaceNode = function(from, to, length, offset) {
|
||||
if (!currentSelection) return;
|
||||
|
||||
function replace(point) {
|
||||
if (from == point.node) {
|
||||
currentSelection.changed = true;
|
||||
if (length && point.offset > length) {
|
||||
point.offset -= length;
|
||||
}
|
||||
else {
|
||||
point.node = to;
|
||||
point.offset += (offset || 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
replace(currentSelection.start);
|
||||
replace(currentSelection.end);
|
||||
};
|
||||
|
||||
select.snapshotMove = function(from, to, distance, relative, ifAtStart) {
|
||||
if (!currentSelection) return;
|
||||
|
||||
function move(point) {
|
||||
if (from == point.node && (!ifAtStart || point.offset == 0)) {
|
||||
currentSelection.changed = true;
|
||||
point.node = to;
|
||||
if (relative) point.offset = Math.max(0, point.offset + distance);
|
||||
else point.offset = distance;
|
||||
}
|
||||
}
|
||||
move(currentSelection.start);
|
||||
move(currentSelection.end);
|
||||
};
|
||||
|
||||
// Most functions are defined in two ways, one for the IE selection
|
||||
// model, one for the W3C one.
|
||||
if (select.ie_selection) {
|
||||
function selectionNode(win, start) {
|
||||
var range = win.document.selection.createRange();
|
||||
range.collapse(start);
|
||||
|
||||
function nodeAfter(node) {
|
||||
var found = null;
|
||||
while (!found && node) {
|
||||
found = node.nextSibling;
|
||||
node = node.parentNode;
|
||||
}
|
||||
return nodeAtStartOf(found);
|
||||
}
|
||||
|
||||
function nodeAtStartOf(node) {
|
||||
while (node && node.firstChild) node = node.firstChild;
|
||||
return {node: node, offset: 0};
|
||||
}
|
||||
|
||||
var containing = range.parentElement();
|
||||
if (!isAncestor(win.document.body, containing)) return null;
|
||||
if (!containing.firstChild) return nodeAtStartOf(containing);
|
||||
|
||||
var working = range.duplicate();
|
||||
working.moveToElementText(containing);
|
||||
working.collapse(true);
|
||||
for (var cur = containing.firstChild; cur; cur = cur.nextSibling) {
|
||||
if (cur.nodeType == 3) {
|
||||
var size = cur.nodeValue.length;
|
||||
working.move("character", size);
|
||||
}
|
||||
else {
|
||||
working.moveToElementText(cur);
|
||||
working.collapse(false);
|
||||
}
|
||||
|
||||
var dir = range.compareEndPoints("StartToStart", working);
|
||||
if (dir == 0) return nodeAfter(cur);
|
||||
if (dir == 1) continue;
|
||||
if (cur.nodeType != 3) return nodeAtStartOf(cur);
|
||||
|
||||
working.setEndPoint("StartToEnd", range);
|
||||
return {node: cur, offset: size - working.text.length};
|
||||
}
|
||||
return nodeAfter(containing);
|
||||
}
|
||||
|
||||
select.markSelection = function(win) {
|
||||
currentSelection = null;
|
||||
var sel = win.document.selection;
|
||||
if (!sel) return;
|
||||
var start = selectionNode(win, true),
|
||||
end = selectionNode(win, false);
|
||||
if (!start || !end) return;
|
||||
currentSelection = {start: start, end: end, window: win, changed: false};
|
||||
};
|
||||
|
||||
select.selectMarked = function() {
|
||||
if (!currentSelection || !currentSelection.changed) return;
|
||||
var win = currentSelection.window, doc = win.document;
|
||||
|
||||
function makeRange(point) {
|
||||
var range = doc.body.createTextRange(),
|
||||
node = point.node;
|
||||
if (!node) {
|
||||
range.moveToElementText(currentSelection.window.document.body);
|
||||
range.collapse(false);
|
||||
}
|
||||
else if (node.nodeType == 3) {
|
||||
range.moveToElementText(node.parentNode);
|
||||
var offset = point.offset;
|
||||
while (node.previousSibling) {
|
||||
node = node.previousSibling;
|
||||
offset += (node.innerText || "").length;
|
||||
}
|
||||
range.move("character", offset);
|
||||
}
|
||||
else {
|
||||
range.moveToElementText(node);
|
||||
range.collapse(true);
|
||||
}
|
||||
return range;
|
||||
}
|
||||
|
||||
var start = makeRange(currentSelection.start), end = makeRange(currentSelection.end);
|
||||
start.setEndPoint("StartToEnd", end);
|
||||
start.select();
|
||||
};
|
||||
|
||||
// Get the top-level node that one end of the cursor is inside or
|
||||
// after. Note that this returns false for 'no cursor', and null
|
||||
// for 'start of document'.
|
||||
select.selectionTopNode = function(container, start) {
|
||||
var selection = container.ownerDocument.selection;
|
||||
if (!selection) return false;
|
||||
|
||||
var range = selection.createRange(), range2 = range.duplicate();
|
||||
range.collapse(start);
|
||||
var around = range.parentElement();
|
||||
if (around && isAncestor(container, around)) {
|
||||
// Only use this node if the selection is not at its start.
|
||||
range2.moveToElementText(around);
|
||||
if (range.compareEndPoints("StartToStart", range2) == 1)
|
||||
return topLevelNodeAt(around, container);
|
||||
}
|
||||
|
||||
// Move the start of a range to the start of a node,
|
||||
// compensating for the fact that you can't call
|
||||
// moveToElementText with text nodes.
|
||||
function moveToNodeStart(range, node) {
|
||||
if (node.nodeType == 3) {
|
||||
var count = 0, cur = node.previousSibling;
|
||||
while (cur && cur.nodeType == 3) {
|
||||
count += cur.nodeValue.length;
|
||||
cur = cur.previousSibling;
|
||||
}
|
||||
if (cur) {
|
||||
try{range.moveToElementText(cur);}
|
||||
catch(e){return false;}
|
||||
range.collapse(false);
|
||||
}
|
||||
else range.moveToElementText(node.parentNode);
|
||||
if (count) range.move("character", count);
|
||||
}
|
||||
else {
|
||||
try{range.moveToElementText(node);}
|
||||
catch(e){return false;}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Do a binary search through the container object, comparing
|
||||
// the start of each node to the selection
|
||||
var start = 0, end = container.childNodes.length - 1;
|
||||
while (start < end) {
|
||||
var middle = Math.ceil((end + start) / 2), node = container.childNodes[middle];
|
||||
if (!node) return false; // Don't ask. IE6 manages this sometimes.
|
||||
if (!moveToNodeStart(range2, node)) return false;
|
||||
if (range.compareEndPoints("StartToStart", range2) == 1)
|
||||
start = middle;
|
||||
else
|
||||
end = middle - 1;
|
||||
}
|
||||
return container.childNodes[start] || null;
|
||||
};
|
||||
|
||||
// Place the cursor after this.start. This is only useful when
|
||||
// manually moving the cursor instead of restoring it to its old
|
||||
// position.
|
||||
select.focusAfterNode = function(node, container) {
|
||||
var range = container.ownerDocument.body.createTextRange();
|
||||
range.moveToElementText(node || container);
|
||||
range.collapse(!node);
|
||||
range.select();
|
||||
};
|
||||
|
||||
select.somethingSelected = function(win) {
|
||||
var sel = win.document.selection;
|
||||
return sel && (sel.createRange().text != "");
|
||||
};
|
||||
|
||||
function insertAtCursor(window, html) {
|
||||
var selection = window.document.selection;
|
||||
if (selection) {
|
||||
var range = selection.createRange();
|
||||
range.pasteHTML(html);
|
||||
range.collapse(false);
|
||||
range.select();
|
||||
}
|
||||
}
|
||||
|
||||
// Used to normalize the effect of the enter key, since browsers
|
||||
// do widely different things when pressing enter in designMode.
|
||||
select.insertNewlineAtCursor = function(window) {
|
||||
insertAtCursor(window, "<br>");
|
||||
};
|
||||
|
||||
select.insertTabAtCursor = function(window) {
|
||||
insertAtCursor(window, fourSpaces);
|
||||
};
|
||||
|
||||
// Get the BR node at the start of the line on which the cursor
|
||||
// currently is, and the offset into the line. Returns null as
|
||||
// node if cursor is on first line.
|
||||
select.cursorPos = function(container, start) {
|
||||
var selection = container.ownerDocument.selection;
|
||||
if (!selection) return null;
|
||||
|
||||
var topNode = select.selectionTopNode(container, start);
|
||||
while (topNode && !isBR(topNode))
|
||||
topNode = topNode.previousSibling;
|
||||
|
||||
var range = selection.createRange(), range2 = range.duplicate();
|
||||
range.collapse(start);
|
||||
if (topNode) {
|
||||
range2.moveToElementText(topNode);
|
||||
range2.collapse(false);
|
||||
}
|
||||
else {
|
||||
// When nothing is selected, we can get all kinds of funky errors here.
|
||||
try { range2.moveToElementText(container); }
|
||||
catch (e) { return null; }
|
||||
range2.collapse(true);
|
||||
}
|
||||
range.setEndPoint("StartToStart", range2);
|
||||
|
||||
return {node: topNode, offset: range.text.length};
|
||||
};
|
||||
|
||||
select.setCursorPos = function(container, from, to) {
|
||||
function rangeAt(pos) {
|
||||
var range = container.ownerDocument.body.createTextRange();
|
||||
if (!pos.node) {
|
||||
range.moveToElementText(container);
|
||||
range.collapse(true);
|
||||
}
|
||||
else {
|
||||
range.moveToElementText(pos.node);
|
||||
range.collapse(false);
|
||||
}
|
||||
range.move("character", pos.offset);
|
||||
return range;
|
||||
}
|
||||
|
||||
var range = rangeAt(from);
|
||||
if (to && to != from)
|
||||
range.setEndPoint("EndToEnd", rangeAt(to));
|
||||
range.select();
|
||||
}
|
||||
|
||||
// Some hacks for storing and re-storing the selection when the editor loses and regains focus.
|
||||
select.getBookmark = function (container) {
|
||||
var from = select.cursorPos(container, true), to = select.cursorPos(container, false);
|
||||
if (from && to) return {from: from, to: to};
|
||||
};
|
||||
|
||||
// Restore a stored selection.
|
||||
select.setBookmark = function(container, mark) {
|
||||
if (!mark) return;
|
||||
select.setCursorPos(container, mark.from, mark.to);
|
||||
};
|
||||
}
|
||||
// W3C model
|
||||
else {
|
||||
// Store start and end nodes, and offsets within these, and refer
|
||||
// back to the selection object from those nodes, so that this
|
||||
// object can be updated when the nodes are replaced before the
|
||||
// selection is restored.
|
||||
select.markSelection = function (win) {
|
||||
var selection = win.getSelection();
|
||||
if (!selection || selection.rangeCount == 0)
|
||||
return (currentSelection = null);
|
||||
var range = selection.getRangeAt(0);
|
||||
|
||||
currentSelection = {
|
||||
start: {node: range.startContainer, offset: range.startOffset},
|
||||
end: {node: range.endContainer, offset: range.endOffset},
|
||||
window: win,
|
||||
changed: false
|
||||
};
|
||||
|
||||
// We want the nodes right at the cursor, not one of their
|
||||
// ancestors with a suitable offset. This goes down the DOM tree
|
||||
// until a 'leaf' is reached (or is it *up* the DOM tree?).
|
||||
function normalize(point){
|
||||
while (point.node.nodeType != 3 && !isBR(point.node)) {
|
||||
var newNode = point.node.childNodes[point.offset] || point.node.nextSibling;
|
||||
point.offset = 0;
|
||||
while (!newNode && point.node.parentNode) {
|
||||
point.node = point.node.parentNode;
|
||||
newNode = point.node.nextSibling;
|
||||
}
|
||||
point.node = newNode;
|
||||
if (!newNode)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
normalize(currentSelection.start);
|
||||
normalize(currentSelection.end);
|
||||
};
|
||||
|
||||
select.selectMarked = function () {
|
||||
var cs = currentSelection;
|
||||
// on webkit-based browsers, it is apparently possible that the
|
||||
// selection gets reset even when a node that is not one of the
|
||||
// endpoints get messed with. the most common situation where
|
||||
// this occurs is when a selection is deleted or overwitten. we
|
||||
// check for that here.
|
||||
function focusIssue() {
|
||||
return cs.start.node == cs.end.node && cs.start.offset == 0 && cs.end.offset == 0;
|
||||
}
|
||||
if (!cs || !(cs.changed || (webkit && focusIssue()))) return;
|
||||
var win = cs.window, range = win.document.createRange();
|
||||
|
||||
function setPoint(point, which) {
|
||||
if (point.node) {
|
||||
// Some magic to generalize the setting of the start and end
|
||||
// of a range.
|
||||
if (point.offset == 0)
|
||||
range["set" + which + "Before"](point.node);
|
||||
else
|
||||
range["set" + which](point.node, point.offset);
|
||||
}
|
||||
else {
|
||||
range.setStartAfter(win.document.body.lastChild || win.document.body);
|
||||
}
|
||||
}
|
||||
|
||||
setPoint(cs.end, "End");
|
||||
setPoint(cs.start, "Start");
|
||||
selectRange(range, win);
|
||||
};
|
||||
|
||||
// Helper for selecting a range object.
|
||||
function selectRange(range, window) {
|
||||
var selection = window.getSelection();
|
||||
selection.removeAllRanges();
|
||||
selection.addRange(range);
|
||||
};
|
||||
function selectionRange(window) {
|
||||
var selection = window.getSelection();
|
||||
if (!selection || selection.rangeCount == 0)
|
||||
return false;
|
||||
else
|
||||
return selection.getRangeAt(0);
|
||||
}
|
||||
|
||||
// Finding the top-level node at the cursor in the W3C is, as you
|
||||
// can see, quite an involved process.
|
||||
select.selectionTopNode = function(container, start) {
|
||||
var range = selectionRange(container.ownerDocument.defaultView);
|
||||
if (!range) return false;
|
||||
|
||||
var node = start ? range.startContainer : range.endContainer;
|
||||
var offset = start ? range.startOffset : range.endOffset;
|
||||
// Work around (yet another) bug in Opera's selection model.
|
||||
if (window.opera && !start && range.endContainer == container && range.endOffset == range.startOffset + 1 &&
|
||||
container.childNodes[range.startOffset] && isBR(container.childNodes[range.startOffset]))
|
||||
offset--;
|
||||
|
||||
// For text nodes, we look at the node itself if the cursor is
|
||||
// inside, or at the node before it if the cursor is at the
|
||||
// start.
|
||||
if (node.nodeType == 3){
|
||||
if (offset > 0)
|
||||
return topLevelNodeAt(node, container);
|
||||
else
|
||||
return topLevelNodeBefore(node, container);
|
||||
}
|
||||
// Occasionally, browsers will return the HTML node as
|
||||
// selection. If the offset is 0, we take the start of the frame
|
||||
// ('after null'), otherwise, we take the last node.
|
||||
else if (node.nodeName.toUpperCase() == "HTML") {
|
||||
return (offset == 1 ? null : container.lastChild);
|
||||
}
|
||||
// If the given node is our 'container', we just look up the
|
||||
// correct node by using the offset.
|
||||
else if (node == container) {
|
||||
return (offset == 0) ? null : node.childNodes[offset - 1];
|
||||
}
|
||||
// In any other case, we have a regular node. If the cursor is
|
||||
// at the end of the node, we use the node itself, if it is at
|
||||
// the start, we use the node before it, and in any other
|
||||
// case, we look up the child before the cursor and use that.
|
||||
else {
|
||||
if (offset == node.childNodes.length)
|
||||
return topLevelNodeAt(node, container);
|
||||
else if (offset == 0)
|
||||
return topLevelNodeBefore(node, container);
|
||||
else
|
||||
return topLevelNodeAt(node.childNodes[offset - 1], container);
|
||||
}
|
||||
};
|
||||
|
||||
select.focusAfterNode = function(node, container) {
|
||||
var win = container.ownerDocument.defaultView,
|
||||
range = win.document.createRange();
|
||||
range.setStartBefore(container.firstChild || container);
|
||||
// In Opera, setting the end of a range at the end of a line
|
||||
// (before a BR) will cause the cursor to appear on the next
|
||||
// line, so we set the end inside of the start node when
|
||||
// possible.
|
||||
if (node && !node.firstChild)
|
||||
range.setEndAfter(node);
|
||||
else if (node)
|
||||
range.setEnd(node, node.childNodes.length);
|
||||
else
|
||||
range.setEndBefore(container.firstChild || container);
|
||||
range.collapse(false);
|
||||
selectRange(range, win);
|
||||
};
|
||||
|
||||
select.somethingSelected = function(win) {
|
||||
var range = selectionRange(win);
|
||||
return range && !range.collapsed;
|
||||
};
|
||||
|
||||
function insertNodeAtCursor(window, node) {
|
||||
var range = selectionRange(window);
|
||||
if (!range) return;
|
||||
|
||||
range.deleteContents();
|
||||
range.insertNode(node);
|
||||
webkitLastLineHack(window.document.body);
|
||||
range = window.document.createRange();
|
||||
range.selectNode(node);
|
||||
range.collapse(false);
|
||||
selectRange(range, window);
|
||||
}
|
||||
|
||||
select.insertNewlineAtCursor = function(window) {
|
||||
insertNodeAtCursor(window, window.document.createElement("BR"));
|
||||
};
|
||||
|
||||
select.insertTabAtCursor = function(window) {
|
||||
insertNodeAtCursor(window, window.document.createTextNode(fourSpaces));
|
||||
};
|
||||
|
||||
select.cursorPos = function(container, start) {
|
||||
var range = selectionRange(window);
|
||||
if (!range) return;
|
||||
|
||||
var topNode = select.selectionTopNode(container, start);
|
||||
while (topNode && !isBR(topNode))
|
||||
topNode = topNode.previousSibling;
|
||||
|
||||
range = range.cloneRange();
|
||||
range.collapse(start);
|
||||
if (topNode)
|
||||
range.setStartAfter(topNode);
|
||||
else
|
||||
range.setStartBefore(container);
|
||||
return {node: topNode, offset: range.toString().length};
|
||||
};
|
||||
|
||||
select.setCursorPos = function(container, from, to) {
|
||||
var win = container.ownerDocument.defaultView,
|
||||
range = win.document.createRange();
|
||||
|
||||
function setPoint(node, offset, side) {
|
||||
if (offset == 0 && node && !node.nextSibling) {
|
||||
range["set" + side + "After"](node);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!node)
|
||||
node = container.firstChild;
|
||||
else
|
||||
node = node.nextSibling;
|
||||
|
||||
if (!node) return;
|
||||
|
||||
if (offset == 0) {
|
||||
range["set" + side + "Before"](node);
|
||||
return true;
|
||||
}
|
||||
|
||||
var backlog = []
|
||||
function decompose(node) {
|
||||
if (node.nodeType == 3)
|
||||
backlog.push(node);
|
||||
else
|
||||
forEach(node.childNodes, decompose);
|
||||
}
|
||||
while (true) {
|
||||
while (node && !backlog.length) {
|
||||
decompose(node);
|
||||
node = node.nextSibling;
|
||||
}
|
||||
var cur = backlog.shift();
|
||||
if (!cur) return false;
|
||||
|
||||
var length = cur.nodeValue.length;
|
||||
if (length >= offset) {
|
||||
range["set" + side](cur, offset);
|
||||
return true;
|
||||
}
|
||||
offset -= length;
|
||||
}
|
||||
}
|
||||
|
||||
to = to || from;
|
||||
if (setPoint(to.node, to.offset, "End") && setPoint(from.node, from.offset, "Start"))
|
||||
selectRange(range, win);
|
||||
};
|
||||
}
|
||||
})();
|
|
@ -0,0 +1,140 @@
|
|||
/* String streams are the things fed to parsers (which can feed them
|
||||
* to a tokenizer if they want). They provide peek and next methods
|
||||
* for looking at the current character (next 'consumes' this
|
||||
* character, peek does not), and a get method for retrieving all the
|
||||
* text that was consumed since the last time get was called.
|
||||
*
|
||||
* An easy mistake to make is to let a StopIteration exception finish
|
||||
* the token stream while there are still characters pending in the
|
||||
* string stream (hitting the end of the buffer while parsing a
|
||||
* token). To make it easier to detect such errors, the stringstreams
|
||||
* throw an exception when this happens.
|
||||
*/
|
||||
|
||||
// Make a stringstream stream out of an iterator that returns strings.
|
||||
// This is applied to the result of traverseDOM (see codemirror.js),
|
||||
// and the resulting stream is fed to the parser.
|
||||
var stringStream = function(source){
|
||||
// String that's currently being iterated over.
|
||||
var current = "";
|
||||
// Position in that string.
|
||||
var pos = 0;
|
||||
// Accumulator for strings that have been iterated over but not
|
||||
// get()-ed yet.
|
||||
var accum = "";
|
||||
// Make sure there are more characters ready, or throw
|
||||
// StopIteration.
|
||||
function ensureChars() {
|
||||
while (pos == current.length) {
|
||||
accum += current;
|
||||
current = ""; // In case source.next() throws
|
||||
pos = 0;
|
||||
try {current = source.next();}
|
||||
catch (e) {
|
||||
if (e != StopIteration) throw e;
|
||||
else return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return {
|
||||
// Return the next character in the stream.
|
||||
peek: function() {
|
||||
if (!ensureChars()) return null;
|
||||
return current.charAt(pos);
|
||||
},
|
||||
// Get the next character, throw StopIteration if at end, check
|
||||
// for unused content.
|
||||
next: function() {
|
||||
if (!ensureChars()) {
|
||||
if (accum.length > 0)
|
||||
throw "End of stringstream reached without emptying buffer ('" + accum + "').";
|
||||
else
|
||||
throw StopIteration;
|
||||
}
|
||||
return current.charAt(pos++);
|
||||
},
|
||||
// Return the characters iterated over since the last call to
|
||||
// .get().
|
||||
get: function() {
|
||||
var temp = accum;
|
||||
accum = "";
|
||||
if (pos > 0){
|
||||
temp += current.slice(0, pos);
|
||||
current = current.slice(pos);
|
||||
pos = 0;
|
||||
}
|
||||
return temp;
|
||||
},
|
||||
// Push a string back into the stream.
|
||||
push: function(str) {
|
||||
current = current.slice(0, pos) + str + current.slice(pos);
|
||||
},
|
||||
lookAhead: function(str, consume, skipSpaces, caseInsensitive) {
|
||||
function cased(str) {return caseInsensitive ? str.toLowerCase() : str;}
|
||||
str = cased(str);
|
||||
var found = false;
|
||||
|
||||
var _accum = accum, _pos = pos;
|
||||
if (skipSpaces) this.nextWhileMatches(/[\s\u00a0]/);
|
||||
|
||||
while (true) {
|
||||
var end = pos + str.length, left = current.length - pos;
|
||||
if (end <= current.length) {
|
||||
found = str == cased(current.slice(pos, end));
|
||||
pos = end;
|
||||
break;
|
||||
}
|
||||
else if (str.slice(0, left) == cased(current.slice(pos))) {
|
||||
accum += current; current = "";
|
||||
try {current = source.next();}
|
||||
catch (e) {break;}
|
||||
pos = 0;
|
||||
str = str.slice(left);
|
||||
}
|
||||
else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(found && consume)) {
|
||||
current = accum.slice(_accum.length) + current;
|
||||
pos = _pos;
|
||||
accum = _accum;
|
||||
}
|
||||
|
||||
return found;
|
||||
},
|
||||
|
||||
// Utils built on top of the above
|
||||
more: function() {
|
||||
return this.peek() !== null;
|
||||
},
|
||||
applies: function(test) {
|
||||
var next = this.peek();
|
||||
return (next !== null && test(next));
|
||||
},
|
||||
nextWhile: function(test) {
|
||||
var next;
|
||||
while ((next = this.peek()) !== null && test(next))
|
||||
this.next();
|
||||
},
|
||||
matches: function(re) {
|
||||
var next = this.peek();
|
||||
return (next !== null && re.test(next));
|
||||
},
|
||||
nextWhileMatches: function(re) {
|
||||
var next;
|
||||
while ((next = this.peek()) !== null && re.test(next))
|
||||
this.next();
|
||||
},
|
||||
equals: function(ch) {
|
||||
return ch === this.peek();
|
||||
},
|
||||
endOfLine: function() {
|
||||
var next = this.peek();
|
||||
return next == null || next == "\n";
|
||||
}
|
||||
};
|
||||
};
|
|
@ -0,0 +1,57 @@
|
|||
// A framework for simple tokenizers. Takes care of newlines and
|
||||
// white-space, and of getting the text from the source stream into
|
||||
// the token object. A state is a function of two arguments -- a
|
||||
// string stream and a setState function. The second can be used to
|
||||
// change the tokenizer's state, and can be ignored for stateless
|
||||
// tokenizers. This function should advance the stream over a token
|
||||
// and return a string or object containing information about the next
|
||||
// token, or null to pass and have the (new) state be called to finish
|
||||
// the token. When a string is given, it is wrapped in a {style, type}
|
||||
// object. In the resulting object, the characters consumed are stored
|
||||
// under the content property. Any whitespace following them is also
|
||||
// automatically consumed, and added to the value property. (Thus,
|
||||
// content is the actual meaningful part of the token, while value
|
||||
// contains all the text it spans.)
|
||||
|
||||
function tokenizer(source, state) {
|
||||
// Newlines are always a separate token.
|
||||
function isWhiteSpace(ch) {
|
||||
// The messy regexp is because IE's regexp matcher is of the
|
||||
// opinion that non-breaking spaces are no whitespace.
|
||||
return ch != "\n" && /^[\s\u00a0]*$/.test(ch);
|
||||
}
|
||||
|
||||
var tokenizer = {
|
||||
state: state,
|
||||
|
||||
take: function(type) {
|
||||
if (typeof(type) == "string")
|
||||
type = {style: type, type: type};
|
||||
|
||||
type.content = (type.content || "") + source.get();
|
||||
if (!/\n$/.test(type.content))
|
||||
source.nextWhile(isWhiteSpace);
|
||||
type.value = type.content + source.get();
|
||||
return type;
|
||||
},
|
||||
|
||||
next: function () {
|
||||
if (!source.more()) throw StopIteration;
|
||||
|
||||
var type;
|
||||
if (source.equals("\n")) {
|
||||
source.next();
|
||||
return this.take("whitespace");
|
||||
}
|
||||
|
||||
if (source.applies(isWhiteSpace))
|
||||
type = "whitespace";
|
||||
else
|
||||
while (!type)
|
||||
type = this.state(source, function(s) {tokenizer.state = s;});
|
||||
|
||||
return this.take(type);
|
||||
}
|
||||
};
|
||||
return tokenizer;
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
/* Tokenizer for JavaScript code */
|
||||
|
||||
var tokenizeJavaScript = (function() {
|
||||
// Advance the stream until the given character (not preceded by a
|
||||
// backslash) is encountered, or the end of the line is reached.
|
||||
function nextUntilUnescaped(source, end) {
|
||||
var escaped = false;
|
||||
var next;
|
||||
while (!source.endOfLine()) {
|
||||
var next = source.next();
|
||||
if (next == end && !escaped)
|
||||
return false;
|
||||
escaped = !escaped && next == "\\";
|
||||
}
|
||||
return escaped;
|
||||
}
|
||||
|
||||
// A map of JavaScript's keywords. The a/b/c keyword distinction is
|
||||
// very rough, but it gives the parser enough information to parse
|
||||
// correct code correctly (we don't care that much how we parse
|
||||
// incorrect code). The style information included in these objects
|
||||
// is used by the highlighter to pick the correct CSS style for a
|
||||
// token.
|
||||
var keywords = function(){
|
||||
function result(type, style){
|
||||
return {type: type, style: "js-" + style};
|
||||
}
|
||||
// keywords that take a parenthised expression, and then a
|
||||
// statement (if)
|
||||
var keywordA = result("keyword a", "keyword");
|
||||
// keywords that take just a statement (else)
|
||||
var keywordB = result("keyword b", "keyword");
|
||||
// keywords that optionally take an expression, and form a
|
||||
// statement (return)
|
||||
var keywordC = result("keyword c", "keyword");
|
||||
var operator = result("operator", "keyword");
|
||||
var atom = result("atom", "atom");
|
||||
return {
|
||||
"if": keywordA, "while": keywordA, "with": keywordA,
|
||||
"else": keywordB, "do": keywordB, "try": keywordB, "finally": keywordB,
|
||||
"return": keywordC, "break": keywordC, "continue": keywordC, "new": keywordC, "delete": keywordC, "throw": keywordC,
|
||||
"in": operator, "typeof": operator, "instanceof": operator,
|
||||
"var": result("var", "keyword"), "function": result("function", "keyword"), "catch": result("catch", "keyword"),
|
||||
"for": result("for", "keyword"), "switch": result("switch", "keyword"),
|
||||
"case": result("case", "keyword"), "default": result("default", "keyword"),
|
||||
"true": atom, "false": atom, "null": atom, "undefined": atom, "NaN": atom, "Infinity": atom
|
||||
};
|
||||
}();
|
||||
|
||||
// Some helper regexps
|
||||
var isOperatorChar = /[+\-*&%=<>!?|]/;
|
||||
var isHexDigit = /[0-9A-Fa-f]/;
|
||||
var isWordChar = /[\w\$_]/;
|
||||
|
||||
// Wrapper around jsToken that helps maintain parser state (whether
|
||||
// we are inside of a multi-line comment and whether the next token
|
||||
// could be a regular expression).
|
||||
function jsTokenState(inside, regexp) {
|
||||
return function(source, setState) {
|
||||
var newInside = inside;
|
||||
var type = jsToken(inside, regexp, source, function(c) {newInside = c;});
|
||||
var newRegexp = type.type == "operator" || type.type == "keyword c" || type.type.match(/^[\[{}\(,;:]$/);
|
||||
if (newRegexp != regexp || newInside != inside)
|
||||
setState(jsTokenState(newInside, newRegexp));
|
||||
return type;
|
||||
};
|
||||
}
|
||||
|
||||
// The token reader, inteded to be used by the tokenizer from
|
||||
// tokenize.js (through jsTokenState). Advances the source stream
|
||||
// over a token, and returns an object containing the type and style
|
||||
// of that token.
|
||||
function jsToken(inside, regexp, source, setInside) {
|
||||
function readHexNumber(){
|
||||
source.next(); // skip the 'x'
|
||||
source.nextWhileMatches(isHexDigit);
|
||||
return {type: "number", style: "js-atom"};
|
||||
}
|
||||
|
||||
function readNumber() {
|
||||
source.nextWhileMatches(/[0-9]/);
|
||||
if (source.equals(".")){
|
||||
source.next();
|
||||
source.nextWhileMatches(/[0-9]/);
|
||||
}
|
||||
if (source.equals("e") || source.equals("E")){
|
||||
source.next();
|
||||
if (source.equals("-"))
|
||||
source.next();
|
||||
source.nextWhileMatches(/[0-9]/);
|
||||
}
|
||||
return {type: "number", style: "js-atom"};
|
||||
}
|
||||
// Read a word, look it up in keywords. If not found, it is a
|
||||
// variable, otherwise it is a keyword of the type found.
|
||||
function readWord() {
|
||||
source.nextWhileMatches(isWordChar);
|
||||
var word = source.get();
|
||||
var known = keywords.hasOwnProperty(word) && keywords.propertyIsEnumerable(word) && keywords[word];
|
||||
return known ? {type: known.type, style: known.style, content: word} :
|
||||
{type: "variable", style: "js-variable", content: word};
|
||||
}
|
||||
function readRegexp() {
|
||||
nextUntilUnescaped(source, "/");
|
||||
source.nextWhileMatches(/[gi]/);
|
||||
return {type: "regexp", style: "js-string"};
|
||||
}
|
||||
// Mutli-line comments are tricky. We want to return the newlines
|
||||
// embedded in them as regular newline tokens, and then continue
|
||||
// returning a comment token for every line of the comment. So
|
||||
// some state has to be saved (inside) to indicate whether we are
|
||||
// inside a /* */ sequence.
|
||||
function readMultilineComment(start){
|
||||
var newInside = "/*";
|
||||
var maybeEnd = (start == "*");
|
||||
while (true) {
|
||||
if (source.endOfLine())
|
||||
break;
|
||||
var next = source.next();
|
||||
if (next == "/" && maybeEnd){
|
||||
newInside = null;
|
||||
break;
|
||||
}
|
||||
maybeEnd = (next == "*");
|
||||
}
|
||||
setInside(newInside);
|
||||
return {type: "comment", style: "js-comment"};
|
||||
}
|
||||
function readOperator() {
|
||||
source.nextWhileMatches(isOperatorChar);
|
||||
return {type: "operator", style: "js-operator"};
|
||||
}
|
||||
function readString(quote) {
|
||||
var endBackSlash = nextUntilUnescaped(source, quote);
|
||||
setInside(endBackSlash ? quote : null);
|
||||
return {type: "string", style: "js-string"};
|
||||
}
|
||||
|
||||
// Fetch the next token. Dispatches on first character in the
|
||||
// stream, or first two characters when the first is a slash.
|
||||
if (inside == "\"" || inside == "'")
|
||||
return readString(inside);
|
||||
var ch = source.next();
|
||||
if (inside == "/*")
|
||||
return readMultilineComment(ch);
|
||||
else if (ch == "\"" || ch == "'")
|
||||
return readString(ch);
|
||||
// with punctuation, the type of the token is the symbol itself
|
||||
else if (/[\[\]{}\(\),;\:\.]/.test(ch))
|
||||
return {type: ch, style: "js-punctuation"};
|
||||
else if (ch == "0" && (source.equals("x") || source.equals("X")))
|
||||
return readHexNumber();
|
||||
else if (/[0-9]/.test(ch))
|
||||
return readNumber();
|
||||
else if (ch == "/"){
|
||||
if (source.equals("*"))
|
||||
{ source.next(); return readMultilineComment(ch); }
|
||||
else if (source.equals("/"))
|
||||
{ nextUntilUnescaped(source, null); return {type: "comment", style: "js-comment"};}
|
||||
else if (regexp)
|
||||
return readRegexp();
|
||||
else
|
||||
return readOperator();
|
||||
}
|
||||
else if (isOperatorChar.test(ch))
|
||||
return readOperator();
|
||||
else
|
||||
return readWord();
|
||||
}
|
||||
|
||||
// The external interface to the tokenizer.
|
||||
return function(source, startState) {
|
||||
return tokenizer(source, startState || jsTokenState(false, true));
|
||||
};
|
||||
})();
|
|
@ -0,0 +1,410 @@
|
|||
/**
|
||||
* Storage and control for undo information within a CodeMirror
|
||||
* editor. 'Why on earth is such a complicated mess required for
|
||||
* that?', I hear you ask. The goal, in implementing this, was to make
|
||||
* the complexity of storing and reverting undo information depend
|
||||
* only on the size of the edited or restored content, not on the size
|
||||
* of the whole document. This makes it necessary to use a kind of
|
||||
* 'diff' system, which, when applied to a DOM tree, causes some
|
||||
* complexity and hackery.
|
||||
*
|
||||
* In short, the editor 'touches' BR elements as it parses them, and
|
||||
* the History stores these. When nothing is touched in commitDelay
|
||||
* milliseconds, the changes are committed: It goes over all touched
|
||||
* nodes, throws out the ones that did not change since last commit or
|
||||
* are no longer in the document, and assembles the rest into zero or
|
||||
* more 'chains' -- arrays of adjacent lines. Links back to these
|
||||
* chains are added to the BR nodes, while the chain that previously
|
||||
* spanned these nodes is added to the undo history. Undoing a change
|
||||
* means taking such a chain off the undo history, restoring its
|
||||
* content (text is saved per line) and linking it back into the
|
||||
* document.
|
||||
*/
|
||||
|
||||
// A history object needs to know about the DOM container holding the
|
||||
// document, the maximum amount of undo levels it should store, the
|
||||
// delay (of no input) after which it commits a set of changes, and,
|
||||
// unfortunately, the 'parent' window -- a window that is not in
|
||||
// designMode, and on which setTimeout works in every browser.
|
||||
function History(container, maxDepth, commitDelay, editor) {
|
||||
this.container = container;
|
||||
this.maxDepth = maxDepth; this.commitDelay = commitDelay;
|
||||
this.editor = editor; this.parent = editor.parent;
|
||||
// This line object represents the initial, empty editor.
|
||||
var initial = {text: "", from: null, to: null};
|
||||
// As the borders between lines are represented by BR elements, the
|
||||
// start of the first line and the end of the last one are
|
||||
// represented by null. Since you can not store any properties
|
||||
// (links to line objects) in null, these properties are used in
|
||||
// those cases.
|
||||
this.first = initial; this.last = initial;
|
||||
// Similarly, a 'historyTouched' property is added to the BR in
|
||||
// front of lines that have already been touched, and 'firstTouched'
|
||||
// is used for the first line.
|
||||
this.firstTouched = false;
|
||||
// History is the set of committed changes, touched is the set of
|
||||
// nodes touched since the last commit.
|
||||
this.history = []; this.redoHistory = []; this.touched = [];
|
||||
}
|
||||
|
||||
History.prototype = {
|
||||
// Schedule a commit (if no other touches come in for commitDelay
|
||||
// milliseconds).
|
||||
scheduleCommit: function() {
|
||||
var self = this;
|
||||
this.parent.clearTimeout(this.commitTimeout);
|
||||
this.commitTimeout = this.parent.setTimeout(function(){self.tryCommit();}, this.commitDelay);
|
||||
},
|
||||
|
||||
// Mark a node as touched. Null is a valid argument.
|
||||
touch: function(node) {
|
||||
this.setTouched(node);
|
||||
this.scheduleCommit();
|
||||
},
|
||||
|
||||
// Undo the last change.
|
||||
undo: function() {
|
||||
// Make sure pending changes have been committed.
|
||||
this.commit();
|
||||
|
||||
if (this.history.length) {
|
||||
// Take the top diff from the history, apply it, and store its
|
||||
// shadow in the redo history.
|
||||
var item = this.history.pop();
|
||||
this.redoHistory.push(this.updateTo(item, "applyChain"));
|
||||
this.notifyEnvironment();
|
||||
return this.chainNode(item);
|
||||
}
|
||||
},
|
||||
|
||||
// Redo the last undone change.
|
||||
redo: function() {
|
||||
this.commit();
|
||||
if (this.redoHistory.length) {
|
||||
// The inverse of undo, basically.
|
||||
var item = this.redoHistory.pop();
|
||||
this.addUndoLevel(this.updateTo(item, "applyChain"));
|
||||
this.notifyEnvironment();
|
||||
return this.chainNode(item);
|
||||
}
|
||||
},
|
||||
|
||||
clear: function() {
|
||||
this.history = [];
|
||||
this.redoHistory = [];
|
||||
},
|
||||
|
||||
// Ask for the size of the un/redo histories.
|
||||
historySize: function() {
|
||||
return {undo: this.history.length, redo: this.redoHistory.length};
|
||||
},
|
||||
|
||||
// Push a changeset into the document.
|
||||
push: function(from, to, lines) {
|
||||
var chain = [];
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
var end = (i == lines.length - 1) ? to : this.container.ownerDocument.createElement("BR");
|
||||
chain.push({from: from, to: end, text: cleanText(lines[i])});
|
||||
from = end;
|
||||
}
|
||||
this.pushChains([chain], from == null && to == null);
|
||||
this.notifyEnvironment();
|
||||
},
|
||||
|
||||
pushChains: function(chains, doNotHighlight) {
|
||||
this.commit(doNotHighlight);
|
||||
this.addUndoLevel(this.updateTo(chains, "applyChain"));
|
||||
this.redoHistory = [];
|
||||
},
|
||||
|
||||
// Retrieve a DOM node from a chain (for scrolling to it after undo/redo).
|
||||
chainNode: function(chains) {
|
||||
for (var i = 0; i < chains.length; i++) {
|
||||
var start = chains[i][0], node = start && (start.from || start.to);
|
||||
if (node) return node;
|
||||
}
|
||||
},
|
||||
|
||||
// Clear the undo history, make the current document the start
|
||||
// position.
|
||||
reset: function() {
|
||||
this.history = []; this.redoHistory = [];
|
||||
},
|
||||
|
||||
textAfter: function(br) {
|
||||
return this.after(br).text;
|
||||
},
|
||||
|
||||
nodeAfter: function(br) {
|
||||
return this.after(br).to;
|
||||
},
|
||||
|
||||
nodeBefore: function(br) {
|
||||
return this.before(br).from;
|
||||
},
|
||||
|
||||
// Commit unless there are pending dirty nodes.
|
||||
tryCommit: function() {
|
||||
if (!window.History) return; // Stop when frame has been unloaded
|
||||
if (this.editor.highlightDirty()) this.commit(true);
|
||||
else this.scheduleCommit();
|
||||
},
|
||||
|
||||
// Check whether the touched nodes hold any changes, if so, commit
|
||||
// them.
|
||||
commit: function(doNotHighlight) {
|
||||
this.parent.clearTimeout(this.commitTimeout);
|
||||
// Make sure there are no pending dirty nodes.
|
||||
if (!doNotHighlight) this.editor.highlightDirty(true);
|
||||
// Build set of chains.
|
||||
var chains = this.touchedChains(), self = this;
|
||||
|
||||
if (chains.length) {
|
||||
this.addUndoLevel(this.updateTo(chains, "linkChain"));
|
||||
this.redoHistory = [];
|
||||
this.notifyEnvironment();
|
||||
}
|
||||
},
|
||||
|
||||
// [ end of public interface ]
|
||||
|
||||
// Update the document with a given set of chains, return its
|
||||
// shadow. updateFunc should be "applyChain" or "linkChain". In the
|
||||
// second case, the chains are taken to correspond the the current
|
||||
// document, and only the state of the line data is updated. In the
|
||||
// first case, the content of the chains is also pushed iinto the
|
||||
// document.
|
||||
updateTo: function(chains, updateFunc) {
|
||||
var shadows = [], dirty = [];
|
||||
for (var i = 0; i < chains.length; i++) {
|
||||
shadows.push(this.shadowChain(chains[i]));
|
||||
dirty.push(this[updateFunc](chains[i]));
|
||||
}
|
||||
if (updateFunc == "applyChain")
|
||||
this.notifyDirty(dirty);
|
||||
return shadows;
|
||||
},
|
||||
|
||||
// Notify the editor that some nodes have changed.
|
||||
notifyDirty: function(nodes) {
|
||||
forEach(nodes, method(this.editor, "addDirtyNode"))
|
||||
this.editor.scheduleHighlight();
|
||||
},
|
||||
|
||||
notifyEnvironment: function() {
|
||||
// Used by the line-wrapping line-numbering code.
|
||||
if (window.frameElement && window.frameElement.CodeMirror.updateNumbers)
|
||||
window.frameElement.CodeMirror.updateNumbers();
|
||||
if (this.onChange) this.onChange();
|
||||
},
|
||||
|
||||
// Link a chain into the DOM nodes (or the first/last links for null
|
||||
// nodes).
|
||||
linkChain: function(chain) {
|
||||
for (var i = 0; i < chain.length; i++) {
|
||||
var line = chain[i];
|
||||
if (line.from) line.from.historyAfter = line;
|
||||
else this.first = line;
|
||||
if (line.to) line.to.historyBefore = line;
|
||||
else this.last = line;
|
||||
}
|
||||
},
|
||||
|
||||
// Get the line object after/before a given node.
|
||||
after: function(node) {
|
||||
return node ? node.historyAfter : this.first;
|
||||
},
|
||||
before: function(node) {
|
||||
return node ? node.historyBefore : this.last;
|
||||
},
|
||||
|
||||
// Mark a node as touched if it has not already been marked.
|
||||
setTouched: function(node) {
|
||||
if (node) {
|
||||
if (!node.historyTouched) {
|
||||
this.touched.push(node);
|
||||
node.historyTouched = true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.firstTouched = true;
|
||||
}
|
||||
},
|
||||
|
||||
// Store a new set of undo info, throw away info if there is more of
|
||||
// it than allowed.
|
||||
addUndoLevel: function(diffs) {
|
||||
this.history.push(diffs);
|
||||
if (this.history.length > this.maxDepth)
|
||||
this.history.shift();
|
||||
},
|
||||
|
||||
// Build chains from a set of touched nodes.
|
||||
touchedChains: function() {
|
||||
var self = this;
|
||||
|
||||
// The temp system is a crummy hack to speed up determining
|
||||
// whether a (currently touched) node has a line object associated
|
||||
// with it. nullTemp is used to store the object for the first
|
||||
// line, other nodes get it stored in their historyTemp property.
|
||||
var nullTemp = null;
|
||||
function temp(node) {return node ? node.historyTemp : nullTemp;}
|
||||
function setTemp(node, line) {
|
||||
if (node) node.historyTemp = line;
|
||||
else nullTemp = line;
|
||||
}
|
||||
|
||||
function buildLine(node) {
|
||||
var text = [];
|
||||
for (var cur = node ? node.nextSibling : self.container.firstChild;
|
||||
cur && !isBR(cur); cur = cur.nextSibling)
|
||||
if (cur.currentText) text.push(cur.currentText);
|
||||
return {from: node, to: cur, text: cleanText(text.join(""))};
|
||||
}
|
||||
|
||||
// Filter out unchanged lines and nodes that are no longer in the
|
||||
// document. Build up line objects for remaining nodes.
|
||||
var lines = [];
|
||||
if (self.firstTouched) self.touched.push(null);
|
||||
forEach(self.touched, function(node) {
|
||||
if (node && node.parentNode != self.container) return;
|
||||
|
||||
if (node) node.historyTouched = false;
|
||||
else self.firstTouched = false;
|
||||
|
||||
var line = buildLine(node), shadow = self.after(node);
|
||||
if (!shadow || shadow.text != line.text || shadow.to != line.to) {
|
||||
lines.push(line);
|
||||
setTemp(node, line);
|
||||
}
|
||||
});
|
||||
|
||||
// Get the BR element after/before the given node.
|
||||
function nextBR(node, dir) {
|
||||
var link = dir + "Sibling", search = node[link];
|
||||
while (search && !isBR(search))
|
||||
search = search[link];
|
||||
return search;
|
||||
}
|
||||
|
||||
// Assemble line objects into chains by scanning the DOM tree
|
||||
// around them.
|
||||
var chains = []; self.touched = [];
|
||||
forEach(lines, function(line) {
|
||||
// Note that this makes the loop skip line objects that have
|
||||
// been pulled into chains by lines before them.
|
||||
if (!temp(line.from)) return;
|
||||
|
||||
var chain = [], curNode = line.from, safe = true;
|
||||
// Put any line objects (referred to by temp info) before this
|
||||
// one on the front of the array.
|
||||
while (true) {
|
||||
var curLine = temp(curNode);
|
||||
if (!curLine) {
|
||||
if (safe) break;
|
||||
else curLine = buildLine(curNode);
|
||||
}
|
||||
chain.unshift(curLine);
|
||||
setTemp(curNode, null);
|
||||
if (!curNode) break;
|
||||
safe = self.after(curNode);
|
||||
curNode = nextBR(curNode, "previous");
|
||||
}
|
||||
curNode = line.to; safe = self.before(line.from);
|
||||
// Add lines after this one at end of array.
|
||||
while (true) {
|
||||
if (!curNode) break;
|
||||
var curLine = temp(curNode);
|
||||
if (!curLine) {
|
||||
if (safe) break;
|
||||
else curLine = buildLine(curNode);
|
||||
}
|
||||
chain.push(curLine);
|
||||
setTemp(curNode, null);
|
||||
safe = self.before(curNode);
|
||||
curNode = nextBR(curNode, "next");
|
||||
}
|
||||
chains.push(chain);
|
||||
});
|
||||
|
||||
return chains;
|
||||
},
|
||||
|
||||
// Find the 'shadow' of a given chain by following the links in the
|
||||
// DOM nodes at its start and end.
|
||||
shadowChain: function(chain) {
|
||||
var shadows = [], next = this.after(chain[0].from), end = chain[chain.length - 1].to;
|
||||
while (true) {
|
||||
shadows.push(next);
|
||||
var nextNode = next.to;
|
||||
if (!nextNode || nextNode == end)
|
||||
break;
|
||||
else
|
||||
next = nextNode.historyAfter || this.before(end);
|
||||
// (The this.before(end) is a hack -- FF sometimes removes
|
||||
// properties from BR nodes, in which case the best we can hope
|
||||
// for is to not break.)
|
||||
}
|
||||
return shadows;
|
||||
},
|
||||
|
||||
// Update the DOM tree to contain the lines specified in a given
|
||||
// chain, link this chain into the DOM nodes.
|
||||
applyChain: function(chain) {
|
||||
// Some attempt is made to prevent the cursor from jumping
|
||||
// randomly when an undo or redo happens. It still behaves a bit
|
||||
// strange sometimes.
|
||||
var cursor = select.cursorPos(this.container, false), self = this;
|
||||
|
||||
// Remove all nodes in the DOM tree between from and to (null for
|
||||
// start/end of container).
|
||||
function removeRange(from, to) {
|
||||
var pos = from ? from.nextSibling : self.container.firstChild;
|
||||
while (pos != to) {
|
||||
var temp = pos.nextSibling;
|
||||
removeElement(pos);
|
||||
pos = temp;
|
||||
}
|
||||
}
|
||||
|
||||
var start = chain[0].from, end = chain[chain.length - 1].to;
|
||||
// Clear the space where this change has to be made.
|
||||
removeRange(start, end);
|
||||
|
||||
// Insert the content specified by the chain into the DOM tree.
|
||||
for (var i = 0; i < chain.length; i++) {
|
||||
var line = chain[i];
|
||||
// The start and end of the space are already correct, but BR
|
||||
// tags inside it have to be put back.
|
||||
if (i > 0)
|
||||
self.container.insertBefore(line.from, end);
|
||||
|
||||
// Add the text.
|
||||
var node = makePartSpan(fixSpaces(line.text), this.container.ownerDocument);
|
||||
self.container.insertBefore(node, end);
|
||||
// See if the cursor was on this line. Put it back, adjusting
|
||||
// for changed line length, if it was.
|
||||
if (cursor && cursor.node == line.from) {
|
||||
var cursordiff = 0;
|
||||
var prev = this.after(line.from);
|
||||
if (prev && i == chain.length - 1) {
|
||||
// Only adjust if the cursor is after the unchanged part of
|
||||
// the line.
|
||||
for (var match = 0; match < cursor.offset &&
|
||||
line.text.charAt(match) == prev.text.charAt(match); match++);
|
||||
if (cursor.offset > match)
|
||||
cursordiff = line.text.length - prev.text.length;
|
||||
}
|
||||
select.setCursorPos(this.container, {node: line.from, offset: Math.max(0, cursor.offset + cursordiff)});
|
||||
}
|
||||
// Cursor was in removed line, this is last new line.
|
||||
else if (cursor && (i == chain.length - 1) && cursor.node && cursor.node.parentNode != this.container) {
|
||||
select.setCursorPos(this.container, {node: line.from, offset: line.text.length});
|
||||
}
|
||||
}
|
||||
|
||||
// Anchor the chain in the DOM tree.
|
||||
this.linkChain(chain);
|
||||
return start;
|
||||
}
|
||||
};
|
|
@ -0,0 +1,130 @@
|
|||
/* A few useful utility functions. */
|
||||
|
||||
// Capture a method on an object.
|
||||
function method(obj, name) {
|
||||
return function() {obj[name].apply(obj, arguments);};
|
||||
}
|
||||
|
||||
// The value used to signal the end of a sequence in iterators.
|
||||
var StopIteration = {toString: function() {return "StopIteration"}};
|
||||
|
||||
// Apply a function to each element in a sequence.
|
||||
function forEach(iter, f) {
|
||||
if (iter.next) {
|
||||
try {while (true) f(iter.next());}
|
||||
catch (e) {if (e != StopIteration) throw e;}
|
||||
}
|
||||
else {
|
||||
for (var i = 0; i < iter.length; i++)
|
||||
f(iter[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Map a function over a sequence, producing an array of results.
|
||||
function map(iter, f) {
|
||||
var accum = [];
|
||||
forEach(iter, function(val) {accum.push(f(val));});
|
||||
return accum;
|
||||
}
|
||||
|
||||
// Create a predicate function that tests a string againsts a given
|
||||
// regular expression. No longer used but might be used by 3rd party
|
||||
// parsers.
|
||||
function matcher(regexp){
|
||||
return function(value){return regexp.test(value);};
|
||||
}
|
||||
|
||||
// Test whether a DOM node has a certain CSS class. Much faster than
|
||||
// the MochiKit equivalent, for some reason.
|
||||
function hasClass(element, className){
|
||||
var classes = element.className;
|
||||
return classes && new RegExp("(^| )" + className + "($| )").test(classes);
|
||||
}
|
||||
|
||||
// Insert a DOM node after another node.
|
||||
function insertAfter(newNode, oldNode) {
|
||||
var parent = oldNode.parentNode;
|
||||
parent.insertBefore(newNode, oldNode.nextSibling);
|
||||
return newNode;
|
||||
}
|
||||
|
||||
function removeElement(node) {
|
||||
if (node.parentNode)
|
||||
node.parentNode.removeChild(node);
|
||||
}
|
||||
|
||||
function clearElement(node) {
|
||||
while (node.firstChild)
|
||||
node.removeChild(node.firstChild);
|
||||
}
|
||||
|
||||
// Check whether a node is contained in another one.
|
||||
function isAncestor(node, child) {
|
||||
while (child = child.parentNode) {
|
||||
if (node == child)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// The non-breaking space character.
|
||||
var nbsp = "\u00a0";
|
||||
var matching = {"{": "}", "[": "]", "(": ")",
|
||||
"}": "{", "]": "[", ")": "("};
|
||||
|
||||
// Standardize a few unportable event properties.
|
||||
function normalizeEvent(event) {
|
||||
if (!event.stopPropagation) {
|
||||
event.stopPropagation = function() {this.cancelBubble = true;};
|
||||
event.preventDefault = function() {this.returnValue = false;};
|
||||
}
|
||||
if (!event.stop) {
|
||||
event.stop = function() {
|
||||
this.stopPropagation();
|
||||
this.preventDefault();
|
||||
};
|
||||
}
|
||||
|
||||
if (event.type == "keypress") {
|
||||
event.code = (event.charCode == null) ? event.keyCode : event.charCode;
|
||||
event.character = String.fromCharCode(event.code);
|
||||
}
|
||||
return event;
|
||||
}
|
||||
|
||||
// Portably register event handlers.
|
||||
function addEventHandler(node, type, handler, removeFunc) {
|
||||
function wrapHandler(event) {
|
||||
handler(normalizeEvent(event || window.event));
|
||||
}
|
||||
if (typeof node.addEventListener == "function") {
|
||||
node.addEventListener(type, wrapHandler, false);
|
||||
if (removeFunc) return function() {node.removeEventListener(type, wrapHandler, false);};
|
||||
}
|
||||
else {
|
||||
node.attachEvent("on" + type, wrapHandler);
|
||||
if (removeFunc) return function() {node.detachEvent("on" + type, wrapHandler);};
|
||||
}
|
||||
}
|
||||
|
||||
function nodeText(node) {
|
||||
return node.textContent || node.innerText || node.nodeValue || "";
|
||||
}
|
||||
|
||||
function nodeTop(node) {
|
||||
var top = 0;
|
||||
while (node.offsetParent) {
|
||||
top += node.offsetTop;
|
||||
node = node.offsetParent;
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
function isBR(node) {
|
||||
var nn = node.nodeName;
|
||||
return nn == "BR" || nn == "br";
|
||||
}
|
||||
function isSpan(node) {
|
||||
var nn = node.nodeName;
|
||||
return nn == "SPAN" || nn == "span";
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
.editbox {
|
||||
margin: .4em;
|
||||
padding: 0;
|
||||
font-family: monospace;
|
||||
font-size: 10pt;
|
||||
color: black;
|
||||
}
|
||||
|
||||
.editbox p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
span.xml-tagname {
|
||||
color: #A0B;
|
||||
}
|
||||
|
||||
span.xml-attribute {
|
||||
color: #281;
|
||||
}
|
||||
|
||||
span.xml-punctuation {
|
||||
color: black;
|
||||
}
|
||||
|
||||
span.xml-attname {
|
||||
color: #00F;
|
||||
}
|
||||
|
||||
span.xml-comment {
|
||||
color: #A70;
|
||||
}
|
||||
|
||||
span.xml-cdata {
|
||||
color: #48A;
|
||||
}
|
||||
|
||||
span.xml-processing {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
span.xml-entity {
|
||||
color: #A22;
|
||||
}
|
||||
|
||||
span.xml-error {
|
||||
color: #F00;
|
||||
}
|
||||
|
||||
span.xml-text {
|
||||
color: black;
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<wps:Capabilities service="WPS" version="1.0.0" xml:lang="en-US" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsGetCapabilities_response.xsd" updateSequence="1">
|
||||
<ows:ServiceIdentification>
|
||||
<ows:Title>52°North WPS ${version}</ows:Title>
|
||||
<ows:Abstract>Service based on the 52°North implementation of WPS 1.0.0</ows:Abstract>
|
||||
<ows:Keywords>
|
||||
<ows:Keyword>WPS</ows:Keyword>
|
||||
<ows:Keyword>geospatial</ows:Keyword>
|
||||
<ows:Keyword>geoprocessing</ows:Keyword>
|
||||
</ows:Keywords>
|
||||
<ows:ServiceType>WPS</ows:ServiceType>
|
||||
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
|
||||
<ows:Fees>NONE</ows:Fees>
|
||||
<ows:AccessConstraints>NONE</ows:AccessConstraints>
|
||||
</ows:ServiceIdentification>
|
||||
<ows:ServiceProvider>
|
||||
<ows:ProviderName>52North</ows:ProviderName>
|
||||
<ows:ProviderSite xlink:href="http://www.52north.org/"/>
|
||||
<ows:ServiceContact>
|
||||
<ows:IndividualName>Your name</ows:IndividualName>
|
||||
<ows:PositionName>Your position</ows:PositionName>
|
||||
<ows:ContactInfo>
|
||||
<ows:Phone>
|
||||
<ows:Voice></ows:Voice>
|
||||
<ows:Facsimile></ows:Facsimile>
|
||||
</ows:Phone>
|
||||
<ows:Address>
|
||||
<ows:DeliveryPoint></ows:DeliveryPoint>
|
||||
<ows:City></ows:City>
|
||||
<ows:AdministrativeArea></ows:AdministrativeArea>
|
||||
<ows:PostalCode></ows:PostalCode>
|
||||
<ows:Country></ows:Country>
|
||||
<ows:ElectronicMailAddress></ows:ElectronicMailAddress>
|
||||
</ows:Address>
|
||||
</ows:ContactInfo>
|
||||
</ows:ServiceContact>
|
||||
</ows:ServiceProvider>
|
||||
<ows:OperationsMetadata>
|
||||
<ows:Operation name="GetCapabilities">
|
||||
<ows:DCP>
|
||||
<ows:HTTP>
|
||||
<ows:Get xlink:href="http://dynamicallygeneratedURL/GeoPS?"/>
|
||||
<ows:Post xlink:href="http://dynamicallygeneratedURL/GeoPS"/>
|
||||
</ows:HTTP>
|
||||
</ows:DCP>
|
||||
</ows:Operation>
|
||||
<ows:Operation name="DescribeProcess">
|
||||
<ows:DCP>
|
||||
<ows:HTTP>
|
||||
<ows:Get xlink:href="http://dynamicallygeneratedURL/GeoPS/GeoPS?"/>
|
||||
<ows:Post xlink:href="http://dynamicallygeneratedURL/GeoPS/GeoPS"/>
|
||||
</ows:HTTP>
|
||||
</ows:DCP>
|
||||
</ows:Operation>
|
||||
<ows:Operation name="Execute">
|
||||
<ows:DCP>
|
||||
<ows:HTTP>
|
||||
<ows:Get xlink:href="http://dynamicallygeneratedURL/GeoPS/GeoPS?"/>
|
||||
<ows:Post xlink:href="http://dynamicallygeneratedURL/GeoPS/GeoPS"/>
|
||||
</ows:HTTP>
|
||||
</ows:DCP>
|
||||
</ows:Operation>
|
||||
</ows:OperationsMetadata>
|
||||
|
||||
<wps:Languages>
|
||||
<wps:Default>
|
||||
<ows:Language>en-US</ows:Language>
|
||||
</wps:Default>
|
||||
<wps:Supported>
|
||||
<ows:Language>en-US</ows:Language>
|
||||
</wps:Supported>
|
||||
</wps:Languages>
|
||||
|
||||
</wps:Capabilities>
|
|
@ -0,0 +1,217 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<WPSConfiguration xmlns="http://n52.org/wps">
|
||||
<Datahandlers>
|
||||
<ParserList>
|
||||
<Parser name="WCPSQueryParser" className="org.n52.wps.io.datahandler.parser.WCPSQueryParser" active="true">
|
||||
<Format mimetype="text/plain" schema="http://schemas.opengis.net/wcps/1.0/wcpsAll.xsd"/>
|
||||
</Parser>
|
||||
<Parser name="WKTParser" className="org.n52.wps.io.datahandler.parser.WKTParser" active="true">
|
||||
<Format mimetype="application/wkt"/>
|
||||
</Parser>
|
||||
<Parser name="GenericXMLDataParser" className="org.n52.wps.io.datahandler.parser.GenericXMLDataParser" active="true">
|
||||
<Format mimetype="text/xml; subtype=gml/2.1.2" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd"/>
|
||||
<Format mimetype="text/xml"/>
|
||||
</Parser>
|
||||
<Parser name="GenericFileParser" className="org.n52.wps.io.datahandler.parser.GenericFileParser" active="true">
|
||||
<Format mimetype="text/xml"/>
|
||||
<Format mimetype="text/csv"/>
|
||||
<Format mimetype="text/plain"/>
|
||||
</Parser>
|
||||
<Parser name="D4ScienceFileParser" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.D4ScienceFileParser" active="true">
|
||||
<Format mimetype="text/xml"/>
|
||||
<Format mimetype="application/d4science"/>
|
||||
</Parser>
|
||||
<Parser name="GisLinkParser" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.GisLinkParser" active="true">
|
||||
<Format mimetype="application/geotiff"/>
|
||||
<Format mimetype="application/wcs"/>
|
||||
<Format mimetype="application/asc"/>
|
||||
<Format mimetype="text/plain"/>
|
||||
<Format mimetype="application/wfs"/>
|
||||
<Format mimetype="application/opendap"/>
|
||||
</Parser>
|
||||
</ParserList>
|
||||
<GeneratorList>
|
||||
<Generator name="WKTGenerator" className="org.n52.wps.io.datahandler.generator.WKTGenerator" active="true">
|
||||
<Format mimetype="application/wkt"/>
|
||||
</Generator>
|
||||
<Generator name="GenericXMLDataGenerator" className="org.n52.wps.io.datahandler.generator.GenericXMLDataGenerator" active="true">
|
||||
<Format mimetype="text/xml; subtype=gml/2.1.2" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd"/>
|
||||
</Generator>
|
||||
<Generator name="GenericFileGenerator" className="org.n52.wps.io.datahandler.generator.GenericFileGenerator" active="true">
|
||||
<Format mimetype="text/plain"/>
|
||||
</Generator>
|
||||
<Generator name="PngFileGenerator" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.PngFileGenerator" active="true">
|
||||
<Format mimetype="image/png"/>
|
||||
</Generator>
|
||||
<Generator name="GifFileGenerator" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.GifFileGenerator" active="true">
|
||||
<Format mimetype="image/gif"/>
|
||||
</Generator>
|
||||
<Generator name="D4ScienceFileGenerator" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.D4ScienceFileGenerator" active="true">
|
||||
<Format mimetype="application/d4science"/>
|
||||
</Generator>
|
||||
<Generator name="CsvFileGenerator" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.CsvFileGenerator" active="true">
|
||||
<Format mimetype="text/csv"/>
|
||||
</Generator>
|
||||
<Generator name="GisLinkGenerator" className="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.GisLinkGenerator" active="true">
|
||||
<Format mimetype="application/wms"/>
|
||||
</Generator>
|
||||
</GeneratorList>
|
||||
</Datahandlers>
|
||||
<AlgorithmRepositoryList>
|
||||
<Repository name="LocalAlgorithmRepository" className="org.n52.wps.server.LocalAlgorithmRepository" active="true">
|
||||
<Property name="Algorithm" active="true">org.n52.wps.demo.TestIO</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.HCAF_FILTER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.OCCURRENCES_SUBTRACTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.OCCURRENCES_MERGER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.OCCURRENCES_INTERSECTOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ABSENCE_CELLS_FROM_AQUAMAPS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIOCLIMATE_HCAF</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIOCLIMATE_HSPEC</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIOCLIMATE_HSPEN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIONYM_LOCAL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OCCURRENCES_DUPLICATES_DELETER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OCCURRENCES_MARINE_TERRESTRIAL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.HCAF_INTERPOLATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.PRESENCE_CELLS_GENERATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MOST_OBSERVED_SPECIES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MOST_OBSERVED_TAXA</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_OBSERVATIONS_PER_AREA</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_OBSERVATIONS_TREND_PER_YEAR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_OBSERVATION_LME_AREA_PER_YEAR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_OBSERVATION_MEOW_AREA_PER_YEAR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TAXONOMY_OBSERVATIONS_TREND_PER_YEAR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GETTABLEDETAILS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LISTDBINFO</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LISTDBNAMES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LISTDBSCHEMA</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LISTTABLES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.RANDOMSAMPLEONTABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SAMPLEONTABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SMARTSAMPLEONTABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SUBMITQUERY</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR_TABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION_TABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION_TABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_GEO_CHART</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_ANALYSIS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_CHARTS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.KNITR_COMPILER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SGVM_INTERPOLATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MAX_ENT_NICHE_MODELLING</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SEADATANET_INTERPOLATOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.RASTER_DATA_PUBLISHER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GEO_CHART</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ESRI_GRID_EXTRACTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OCCURRENCE_ENRICHMENT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.POLYGONS_TO_MAP</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_MAP_FROM_CSQUARES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPECIES_MAP_FROM_POINTS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GENERIC_CHARTS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.POINTS_TO_MAP</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CSQUARES_TO_COORDINATES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CSQUARE_COLUMN_CREATOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GRID_CWP_TO_COORDINATES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.EGIP_ENERGY_AGGREGATED_DISTRIBUTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.EGIP_ENERGY_COUNTRY_DISTRIBUTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.EGIP_ENERGY_TRENDS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.EGIP_ENERGY_YEAR_DISTRIBUTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ESTIMATE_FISHING_ACTIVITY</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ESTIMATE_MONTHLY_FISHING_EFFORT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.WEB_APP_PUBLISHER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.HRS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.QUALITY_ANALYSIS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.DISCREPANCY_ANALYSIS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.MAPS_COMPARISON</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.DBSCAN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.LOF</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.KMEANS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.XMEANS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.modellers.AQUAMAPSNN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.BIONYM</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.LWR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.CMSY</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.modellers.HSPEN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.modellers.FEED_FORWARD_ANN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.BIONYM_BIODIV</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.AQUAMAPS_NATIVE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.AQUAMAPS_NATIVE_2050</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.AQUAMAPS_SUITABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.AQUAMAPS_SUITABLE_2050</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.FAOMSY</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.FEED_FORWARD_A_N_N_DISTRIBUTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SHAPEFILE_PUBLISHER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GENERIC_WORKER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ABSENCE_GENERATION_FROM_OBIS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ECOPATH_WITH_ECOSIM</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ENSEMBLE_MODEL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ICHTHYOP_MODEL_ONE_BY_ONE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STEP_3___VPA_ICCAT_BFT_E_PROJECTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CCAMLR_EXPORTER_TOOL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STEP_1___VPA_ICCAT_BFT_E_RETROS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STEP_2__VPA_ICCAT_BFT_E_VISUALISATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ICHTHYOP_MODEL_MULTIPLE_RUNS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.PARALLELIZED_STEP1_VPA_ICCAT_BFT_E_RETROS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STEP_4_VPA_ICCAT_BFT_E_REPORT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TUNA_ATLAS_DATA_ACCESS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FIGIS_SPATIAL_REALLOCATION_SIMPLIFIED</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FIGIS_SPATIAL_REALLOCATION_GENERIC</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FIGIS_SPATIAL_REALLOCATION_SIMPLIFIED_TABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FIGIS_SDMX_DATA_CONVERTER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_SPECIES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MAKE_ICHTHYOP_NETCDF_CF_COMPLIANT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ICHTHYOP_NETCDF_OUTPUT_TO_SHAPEFILE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GENETICALGORITHM</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GLOBAL_CATCHES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MPA_INTERSECT_V2</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.QUICK_RANK_TRAIN</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.QUICK_RANK_TRAIN_NO_VALIDATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.QUICK_RANK_TEST</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STAT_VAL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TRAJECTORY_BUILDER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TUNA_ATLAS_INDICATOR_1__SPECIES_BY_OCEAN_</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SIMULFISHKPIS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.READWFS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.PROJECTIONS_REPORT_VPA_ICCAT_BFT_E</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.WHOLE_STEPS_VPA_ICCAT_BFT_E</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_FLAGS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_TYPE_OF_SCHOOL</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_GEAR_SIMPLIFIED_VERSION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_FLAGS_SIMPLIFIED_VERSION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_SPECIES_SIMPLIFIED_VERSION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_BY_GEARS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.COMPUTE_FISHERIES_INDICATORS_FROM_OWN_FORMATTED_DATASET</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.IMPORT_FISHERIES_FORMATTED_DATASET___QUICK_IMPORT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_AGGREGATED_FOLLOWING_A_SELECT_VARIABLE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.WTG</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FEED_FORWARD_NEURAL_NETWORK_TRAINER</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SHARK_ABUNDANCY</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.NCOUTPUTS2CSV_VPA_ICCAT_BFT_E</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FEED_FORWARD_NEURAL_NETWORK_REGRESSOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CMSY_2</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.FEED_FORWARD_NEURAL_NETWORK_CLOUD_REGRESSOR</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CATCHES_INDICATORS_IOTC_REST_SERVICES</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.STAT_VAL_UNIPARTITE_NET</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GEOGRSF_INTERSECTS</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TESTING_ALGORITHM</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPATIAL_DISTRIBUTION_OF_CORRELATION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.KAPPA_COEFFICIENT</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TRANSFORM_IOTC_CATCH_AND_EFFORT_DSD_CECOASTAL_AND_CESURFACE</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LOAD_CATCHES_DATASET_IN_SARDARA</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.LOAD_EFFORTS_DATASET_IN_SARDARA</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SCATTERPLOT_DIAGRAM</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SPATIAL_DENSITY_DISTRIBUTION</Property>
|
||||
<Property name="Algorithm" active="true">org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GEOGRSF_BBOXLIST</Property>
|
||||
</Repository>
|
||||
<Repository name="UploadedAlgorithmRepository" className="org.n52.wps.server.UploadedAlgorithmRepository" active="false"/>
|
||||
<Repository name="ServiceLoaderAlgorithmRepository" className="org.n52.wps.server.ServiceLoaderAlgorithmRepository" active="true"/>
|
||||
</AlgorithmRepositoryList>
|
||||
<RemoteRepositoryList/>
|
||||
<Server protocol="http" hostname="localhost" hostport="8080" includeDataInputsInResponse="false" computationTimeoutMilliSeconds="259200000" cacheCapabilites="false" webappPath="wps" repoReloadInterval="0.0" minPoolSize="10" maxPoolSize="20" keepAliveSeconds="1000" maxQueuedTasks="100">
|
||||
<Database/>
|
||||
</Server>
|
||||
</WPSConfiguration>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,87 @@
|
|||
body {
|
||||
font-family: "Trebuchet MS", Helvetica, sans-serif;
|
||||
}
|
||||
|
||||
p.infotext {
|
||||
color: #afafaf;
|
||||
font-size: 10pt;
|
||||
}
|
||||
|
||||
/* Left will inherit from right (so we don't need to duplicate code) */
|
||||
.github-fork-ribbon {
|
||||
/* The right and left classes determine the side we attach our banner to */
|
||||
position: absolute;
|
||||
/* Add a bit of padding to give some substance outside the "stitching" */
|
||||
padding: 2px 0;
|
||||
/* Set the base colour */
|
||||
background-color: #66C5E4;
|
||||
/* Set a gradient: transparent black at the top to almost-transparent black at the bottom */
|
||||
background-image: -webkit-gradient(linear, left top, left bottom, from(rgba(0, 0, 0, 0)),
|
||||
to(rgba(0, 0, 0, 0.15)));
|
||||
background-image: -webkit-linear-gradient(top, rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 0.15));
|
||||
background-image: -moz-linear-gradient(top, rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 0.15));
|
||||
background-image: -ms-linear-gradient(top, rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 0.15));
|
||||
background-image: -o-linear-gradient(top, rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 0.15));
|
||||
background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 0.15));
|
||||
/* Add a drop shadow */
|
||||
-webkit-box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
|
||||
-moz-box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
|
||||
box-shadow: 0 2px 3px 0 rgba(0, 0, 0, 0.5);
|
||||
z-index: 9999;
|
||||
pointer-events: auto;
|
||||
}
|
||||
|
||||
.github-fork-ribbon a,.github-fork-ribbon a:hover {
|
||||
/* Set the font */
|
||||
font: 700 13px "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
color: #fff;
|
||||
/* Set the text properties */
|
||||
text-decoration: none;
|
||||
text-shadow: 0 -1px rgba(0, 0, 0, 0.5);
|
||||
text-align: center;
|
||||
/* Set the geometry. If you fiddle with these you'll also need
|
||||
to tweak the top and right values in .github-fork-ribbon. */
|
||||
width: 200px;
|
||||
line-height: 20px;
|
||||
/* Set the layout properties */
|
||||
display: inline-block;
|
||||
padding: 2px 0;
|
||||
/* Add "stitching" effect */
|
||||
border-width: 1px 0;
|
||||
border-style: dotted;
|
||||
border-color: #fff;
|
||||
border-color: rgba(255, 255, 255, 0.7);
|
||||
}
|
||||
|
||||
.github-fork-ribbon-wrapper {
|
||||
width: 150px;
|
||||
height: 150px;
|
||||
position: absolute;
|
||||
overflow: hidden;
|
||||
top: 0;
|
||||
z-index: 9999;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.github-fork-ribbon-wrapper.fixed {
|
||||
position: fixed;
|
||||
}
|
||||
|
||||
.github-fork-ribbon-wrapper.right {
|
||||
right: 0;
|
||||
}
|
||||
|
||||
.github-fork-ribbon-wrapper.right .github-fork-ribbon {
|
||||
top: 42px;
|
||||
right: -43px;
|
||||
-webkit-transform: rotate(45deg);
|
||||
-moz-transform: rotate(45deg);
|
||||
-ms-transform: rotate(45deg);
|
||||
-o-transform: rotate(45deg);
|
||||
transform: rotate(45deg);
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
#### Use two appenders, one to log to console, another to log to a file
|
||||
log4j.rootCategory=ERROR,AR
|
||||
|
||||
#### Second appender writes to a file
|
||||
#log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
#log4j.appender.stdout.Threshold=OFF
|
||||
#log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.stdout.layout.ConversionPattern=%d{dd/MM/yyyy HH:mm:ss} %p %t %c - %m%n
|
||||
|
||||
log4j.logger.AnalysisLogger=AR
|
||||
log4j.appender.AR=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.AR.Threshold=TRACE
|
||||
log4j.appender.AR.File=logs/analysis/Analysis.log
|
||||
log4j.appender.AR.MaxFileSize=50000KB
|
||||
log4j.appender.AR.MaxBackupIndex=2
|
||||
log4j.appender.AR.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.AR.layout.ConversionPattern=%d{dd/MM/yyyy HH:mm:ss} %p %t %c - %m%n
|
||||
|
||||
#### Third appender writes to a file
|
||||
log4j.logger.org.hibernate=H
|
||||
log4j.appender.H=org.apache.log4j.AsyncAppender
|
||||
log4j.appender.H.Threshold=OFF
|
||||
log4j.appender.H.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.H.layout.ConversionPattern=%d{dd/MM/yyyy HH:mm:ss} %p %t %c - %m%n
|
|
@ -0,0 +1,696 @@
|
|||
##--------------------------------------------------------
|
||||
## CMSY analysis with estimation of total biomass, including Bayesian Schaefer
|
||||
## written by Rainer Froese with support from Gianpaolo Coro in 2013-2014
|
||||
## This version adjusts biomass to average biomass over the year
|
||||
## It also contains the FutureCrash option to improve prediction of final biomass
|
||||
## Version 21 adds the purple point to indicate the 25th percentile of final biomass
|
||||
## Version 22 accepts that no biomass or CPUE area available
|
||||
##--------------------------------------------------------
|
||||
library(R2jags) # Interface with JAGS
|
||||
library(coda)
|
||||
|
||||
#-----------------------------------------
|
||||
# Some general settings
|
||||
#-----------------------------------------
|
||||
# set.seed(999) # use for comparing results between runs
|
||||
rm(list=ls(all=TRUE)) # clear previous variables etc
|
||||
options(digits=3) # displays all numbers with three significant digits as default
|
||||
graphics.off() # close graphics windows from previous sessions
|
||||
|
||||
#-----------------------------------------
|
||||
# General settings for the analysis
|
||||
#-----------------------------------------
|
||||
sigR <- 0.02 # overall process error; 0.05 works reasonable for simulations, 0.02 for real data; 0 if deterministic model
|
||||
n <- 10000 # initial number of r-k pairs
|
||||
batch.mode <- T # set to TRUE to suppress graphs
|
||||
write.output <- T # set to true if table of output is wanted
|
||||
FutureCrash <- "No"
|
||||
|
||||
#-----------------------------------------
|
||||
# Start output to screen
|
||||
#-----------------------------------------
|
||||
cat("-------------------------------------------\n")
|
||||
cat("Catch-MSY Analysis,", date(),"\n")
|
||||
cat("-------------------------------------------\n")
|
||||
|
||||
#------------------------------------------
|
||||
# Read data and assign to vectors
|
||||
#------------------------------------------
|
||||
# filename_1 <- "AllStocks_Catch4.csv"
|
||||
# filename_2 <- "AllStocks_ID4.csv"
|
||||
# filename_1 <- "SimCatch.csv"
|
||||
# filename_2 <- "SimSpec.csv"
|
||||
# filename_2 <- "SimSpecWrongS.csv"
|
||||
# filename_2 <- "SimSpecWrongI.csv"
|
||||
# filename_2 <- "SimSpecWrongF.csv"
|
||||
# filename_2 <- "SimSpecWrongH.csv"
|
||||
# filename_2 <- "SimSpecWrongL.csv"
|
||||
# filename_1 <- "FishDataLim.csv"
|
||||
# filename_2 <- "FishDataLimSpec.csv"
|
||||
filename_1 <- "WKLIFE4Stocks.csv"
|
||||
filename_2 <- "WKLIFE4ID.csv"
|
||||
|
||||
outfile<-"outfile"
|
||||
outfile.txt <- "outputfile.txt"
|
||||
|
||||
cdat <- read.csv(filename_1, header=T, dec=".", stringsAsFactors = FALSE)
|
||||
cinfo <- read.csv(filename_2, header=T, dec=".", stringsAsFactors = FALSE)
|
||||
cat("Files", filename_1, ",", filename_2, "read successfully","\n")
|
||||
|
||||
# Stocks with total biomass data and catch data from StartYear to EndYear
|
||||
# stocks <- sort(as.character(cinfo$stock)) # All stocks
|
||||
stocks<-"HLH_M07"
|
||||
|
||||
# select one stock after the other
|
||||
for(stock in stocks) {
|
||||
# assign data from cinfo to vectors
|
||||
res <- as.character(cinfo$Resilience[cinfo$stock==stock])
|
||||
StartYear <- as.numeric(cinfo$StartYear[cinfo$stock==stock])
|
||||
EndYear <- as.numeric(cinfo$EndYear[cinfo$stock==stock])
|
||||
r_low <- as.numeric(cinfo$r_low[cinfo$stock==stock])
|
||||
r_hi <- as.numeric(cinfo$r_hi[cinfo$stock==stock])
|
||||
stb_low <- as.numeric(cinfo$stb_low[cinfo$stock==stock])
|
||||
stb_hi <- as.numeric(cinfo$stb_hi[cinfo$stock==stock])
|
||||
intyr <- as.numeric(cinfo$intyr[cinfo$stock==stock])
|
||||
intbio_low <- as.numeric(cinfo$intbio_low[cinfo$stock==stock])
|
||||
intbio_hi <- as.numeric(cinfo$intbio_hi[cinfo$stock==stock])
|
||||
endbio_low <- as.numeric(cinfo$endbio_low[cinfo$stock==stock])
|
||||
endbio_hi <- as.numeric(cinfo$endbio_hi[cinfo$stock==stock])
|
||||
Btype <- as.character(cinfo$Btype[cinfo$stock==stock])
|
||||
FutureCrash <- as.character(cinfo$FutureCrash[cinfo$stock==stock])
|
||||
comment <- as.character(cinfo$comment[cinfo$stock==stock])
|
||||
|
||||
|
||||
# extract data on stock
|
||||
yr <- as.numeric(cdat$yr[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])
|
||||
ct <- as.numeric(cdat$ct[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
if(Btype=="observed" | Btype=="CPUE" | Btype=="simulated") {
|
||||
bt <- as.numeric(cdat$TB[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])/1000 ## assumes that biomass is in tonnes, transforms to '000 tonnes
|
||||
} else {bt <- NA}
|
||||
nyr <- length(yr) # number of years in the time series
|
||||
|
||||
|
||||
if(Btype!="observed") {bio <- bt}
|
||||
# change biomass to moving average as assumed by Schaefer (but not for simulations or CPUE)
|
||||
# for last year use reported bio
|
||||
if(Btype=="observed") {
|
||||
ma <- function(x){filter(x,rep(1/2,2),sides=2)}
|
||||
bio <- ma(bt)
|
||||
bio[length(bio)] <- bt[length(bt)] }
|
||||
|
||||
# initialize vectors for viable r, k, bt
|
||||
rv.all <- vector()
|
||||
kv.all <- vector()
|
||||
btv.all <- matrix(data=vector(),ncol=nyr+1)
|
||||
|
||||
|
||||
|
||||
#----------------------------------------------------
|
||||
# Determine initial ranges for parameters and biomass
|
||||
#----------------------------------------------------
|
||||
# initial range of r from input file
|
||||
if(is.na(r_low)==F & is.na(r_hi)==F) {
|
||||
start_r <- c(r_low,r_hi)
|
||||
} else {
|
||||
# initial range of r and CatchMult values based on resilience
|
||||
if(res == "High") {
|
||||
start_r <- c(0.6,1.5)} else if(res == "Medium") {
|
||||
start_r <- c(0.2,0.8)} else if(res == "Low") {
|
||||
start_r <- c(0.05,0.5)} else { # i.e. res== "Very low"
|
||||
start_r <- c(0.015,0.1)}
|
||||
}
|
||||
|
||||
|
||||
# initial range of k values, assuming k will always be larger than max catch
|
||||
# and max catch will never be smaller than a quarter of MSY
|
||||
|
||||
start_k <- c(max(ct),16*max(ct)/start_r[1])
|
||||
|
||||
# initial biomass range from input file
|
||||
if(is.na(stb_low)==F & is.na(stb_hi)==F) {
|
||||
startbio <- c(stb_low,stb_hi)
|
||||
} else {
|
||||
# us low biomass at start as default
|
||||
startbio <- c(0.1,0.5)
|
||||
}
|
||||
|
||||
MinYear <- yr[which.min(ct)]
|
||||
MaxYear <- yr[which.max(ct)]
|
||||
# use year and biomass range for intermediate biomass from input file
|
||||
if(is.na(intbio_low)==F & is.na(intbio_hi)==F) {
|
||||
intyr <- intyr
|
||||
intbio <- c(intbio_low,intbio_hi)
|
||||
# else if year of minimum catch is at least 3 years away from StartYear and EndYear of series, use min catch
|
||||
} else if((MinYear - StartYear) > 3 & (EndYear - MinYear) > 3 ) {
|
||||
# assume that biomass range in year before minimum catch was 0.01 - 0.4
|
||||
intyr <- MinYear-1
|
||||
intbio <- c(0.01,0.4)
|
||||
# else if year of max catch is at least 3 years away from StartYear and EndYear of series, use max catch
|
||||
} else if((MaxYear - StartYear) > 3 & (EndYear - MaxYear) > 3 ) {
|
||||
# assume that biomass range in year before maximum catch was 0.3 - 0.9
|
||||
intyr <- MaxYear-1
|
||||
intbio <- c(0.3,0.9)
|
||||
} else {
|
||||
# assume uninformative range 0-1 in mid-year
|
||||
intyr <- as.integer(mean(c(StartYear, EndYear)))
|
||||
intbio <- c(0,1) }
|
||||
# end of intbio setting
|
||||
|
||||
# final biomass range from input file
|
||||
if(is.na(endbio_low)==F & is.na(endbio_hi)==F) {
|
||||
endbio <- c(endbio_low,endbio_hi)
|
||||
} else {
|
||||
# else use Catch/maxCatch to estimate final biomass
|
||||
endbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.4,0.8)} else {c(0.01,0.4)}
|
||||
} # end of final biomass setting
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
# MC with Schaefer Function filtering
|
||||
#----------------------------------------------
|
||||
Schaefer <- function(ri, ki, startbio, intyr, intbio, endbio, sigR, pt) {
|
||||
|
||||
# if stock is not expected to crash within 3 years if last catch continues
|
||||
if(FutureCrash == "No") {
|
||||
yr.s <- c(yr,EndYear+1,EndYear+2,EndYear+3)
|
||||
ct.s <- c(ct,ct[yr==EndYear],ct[yr==EndYear],ct[yr==EndYear])
|
||||
nyr.s <- length(yr.s)
|
||||
} else{
|
||||
yr.s <- yr
|
||||
ct.s <- ct
|
||||
nyr.s <- nyr
|
||||
}
|
||||
|
||||
# create vector for initial biomasses
|
||||
startbt <-seq(from =startbio[1], to=startbio[2], by = (startbio[2]-startbio[1])/10)
|
||||
# create vectors for viable r, k and bt
|
||||
rv <- array(-1:-1,dim=c(length(ri)*length(startbt))) #initialize array with -1. The -1 remaining after the process will be removed
|
||||
kv <- array(-1:-1,dim=c(length(ri)*length(startbt)))
|
||||
btv <- matrix(data=NA, nrow = (length(ri)*length(startbt)), ncol = nyr+1)
|
||||
intyr.i <- which(yr.s==intyr) # get index of intermediate year
|
||||
|
||||
#loop through r-k pairs
|
||||
npoints = length(ri)
|
||||
nstartb = length(startbt)
|
||||
|
||||
for(i in 1 : npoints) {
|
||||
if (i%%1000==0)
|
||||
cat(".")
|
||||
|
||||
# create empty vector for annual biomasses
|
||||
bt <- vector()
|
||||
|
||||
# loop through range of relative start biomasses
|
||||
for(j in startbt) {
|
||||
# set initial biomass, including process error
|
||||
bt[1]=j*ki[i]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
|
||||
#loop through years in catch time series
|
||||
for(t in 1:nyr.s) { # for all years in the time series
|
||||
xt=rnorm(1,0, sigR) # set new random process error for every year
|
||||
|
||||
# calculate biomass as function of previous year's biomass plus surplus production minus catch
|
||||
bt[t+1]=(bt[t]+ri[i]*bt[t]*(1-bt[t]/ki[i])-ct.s[t])*exp(xt)
|
||||
|
||||
# if biomass < 0.01 k or > 1.1 k, discard r-k pair
|
||||
if(bt[t+1] < 0.01*ki[i] || bt[t+1] > 1.1*ki[i]) { break } # stop looping through years, go to next upper level
|
||||
|
||||
if ((t+1)==intyr.i && (bt[t+1]>(intbio[2]*ki[i]) || bt[t+1]<(intbio[1]*ki[i]))) { break } #intermediate year check
|
||||
|
||||
} # end of loop of years
|
||||
|
||||
# if last biomass falls without expected ranges goto next r-k pair
|
||||
if(t < nyr.s || bt[yr.s==EndYear] > (endbio[2]*ki[i]) || bt[yr.s==EndYear] < (endbio[1]*ki[i])) {
|
||||
next } else {
|
||||
# store r, k, and bt, plot point, then go to next startbt
|
||||
rv[((i-1)*nstartb)+j] <- ri[i]
|
||||
kv[((i-1)*nstartb)+j] <- ki[i]
|
||||
btv[((i-1)*nstartb)+j,] <- bt[1:(nyr+1)]/ki[i] #substitute a row into the matrix, exclude FutureCrash years
|
||||
if(pt==T) {points(x=ri[i], y=ki[i], pch=".", cex=2, col="black")
|
||||
next }
|
||||
}
|
||||
} # end of loop of initial biomasses
|
||||
} # end of loop of r-k pairs
|
||||
|
||||
rv=rv[rv!=-1]
|
||||
kv=kv[kv!=-1]
|
||||
btv=na.omit(btv) #delete first line
|
||||
|
||||
cat("\n")
|
||||
return(list(rv, kv,btv))
|
||||
} # end of Schaefer function
|
||||
|
||||
#------------------------------------------------------------------
|
||||
# Uniform sampling of the r-k space
|
||||
#------------------------------------------------------------------
|
||||
# get random set of r and k from log space distribution
|
||||
ri1 = exp(runif(n, log(start_r[1]), log(start_r[2])))
|
||||
ki1 = exp(runif(n, log(start_k[1]), log(start_k[2])))
|
||||
|
||||
#-----------------------------------------------------------------
|
||||
# Plot data and progress
|
||||
#-----------------------------------------------------------------
|
||||
#windows(14,9)
|
||||
par(mfcol=c(2,3))
|
||||
# plot catch
|
||||
plot(x=yr, y=ct, ylim=c(0,1.2*max(ct)), type ="l", bty="l", main=paste(stock,"catch"), xlab="Year",
|
||||
ylab="Catch", lwd=2)
|
||||
points(x=yr[which.max(ct)], y=max(ct), col="red", lwd=2)
|
||||
points(x=yr[which.min(ct)], y=min(ct), col="red", lwd=2)
|
||||
|
||||
# plot r-k graph
|
||||
plot(ri1, ki1, xlim = start_r, ylim = start_k, log="xy", xlab="r", ylab="k", main="Finding viable r-k", pch=".", cex=2, bty="l", col="lightgrey")
|
||||
|
||||
#1 - Call MC-Schaefer function to preliminary explore the space without prior information
|
||||
cat(stock, ": First Monte Carlo filtering of r-k space with ",n," points\n")
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
#take viable r and k values
|
||||
nviablepoints = length(rv.all)
|
||||
cat("* Found ",nviablepoints," viable points from ",n," samples\n");
|
||||
|
||||
|
||||
#if few points were found then resample and shrink the k log space
|
||||
if (nviablepoints<=1000){
|
||||
log.start_k.new <- log(start_k)
|
||||
max_attempts = 3
|
||||
current_attempts = 1
|
||||
while (nviablepoints<=1000 && current_attempts<=max_attempts){
|
||||
if(nviablepoints > 0) {
|
||||
log.start_k.new[1] <- mean(c(log.start_k.new[1], min(log(kv.all))))
|
||||
log.start_k.new[2] <- mean(c(log.start_k.new[2], max(log(kv.all)))) }
|
||||
n.new=n*current_attempts #add more points
|
||||
ri1 = exp(runif(n.new, log(start_r[1]), log(start_r[2])))
|
||||
ki1 = exp(runif(n.new, log.start_k.new[1], log.start_k.new[2]))
|
||||
cat("Shrinking k space: repeating Monte Carlo in the interval [",exp(log.start_k.new[1]),",",exp(log.start_k.new[2]),"]\n")
|
||||
cat("Attempt ",current_attempts," of ",max_attempts," with ",n.new," points","\n")
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
nviablepoints = length(rv.all) #recalculate viable points
|
||||
cat("* Found altogether",nviablepoints," viable points \n");
|
||||
current_attempts=current_attempts+1 #increment the number of attempts
|
||||
}
|
||||
}
|
||||
|
||||
# If tip of viable r-k pairs is 'thin', do extra sampling there
|
||||
gm.rv = exp(mean(log(rv.all)))
|
||||
if(length(rv.all[rv.all > 0.9*start_r[2]]) < 10) {
|
||||
l.sample.r <- (gm.rv + max(rv.all))/2
|
||||
cat("Final sampling in the tip area above r =",l.sample.r,"\n")
|
||||
log.start_k.new <- c(log(0.8*min(kv.all)),log(max(kv.all[rv.all > l.sample.r])))
|
||||
ri1 = exp(runif(50000, log(l.sample.r), log(start_r[2])))
|
||||
ki1 = exp(runif(50000, log.start_k.new[1], log.start_k.new[2]))
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
nviablepoints = length(rv.all) #recalculate viable points
|
||||
cat("Found altogether", length(rv.all), "unique viable r-k pairs and biomass trajectories\n")
|
||||
}
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Bayesian analysis of catch & biomass with Schaefer model
|
||||
# ------------------------------------------------------------
|
||||
if(Btype == "observed" | Btype=="simulated") {
|
||||
cat("Running Schaefer MCMC analysis....\n")
|
||||
mcmc.burn <- as.integer(30000)
|
||||
mcmc.chainLength <- as.integer(60000) # burn-in plus post-burn
|
||||
mcmc.thin = 10 # to reduce autocorrelation
|
||||
mcmc.chains = 3 # needs to be at least 2 for DIC
|
||||
|
||||
# Parameters to be returned by JAGS
|
||||
jags.save.params=c('r','k','sigma.b', 'alpha', 'sigma.r') #
|
||||
|
||||
# JAGS model
|
||||
Model = "model{
|
||||
# to avoid crash due to 0 values
|
||||
eps<-0.01
|
||||
# set a quite narrow variation from the expected value
|
||||
sigma.b <- 1/16
|
||||
tau.b <- pow(sigma.b,-2)
|
||||
|
||||
Bm[1] <- log(alpha*k)
|
||||
bio[1] ~ dlnorm(Bm[1],tau.b)
|
||||
|
||||
|
||||
for (t in 2:nyr){
|
||||
bio[t] ~ dlnorm(Bm[t],tau.b)
|
||||
Bm[t] <- log(max(bio[t-1] + r*bio[t-1]*(1 - (bio[t-1])/k) - ct[t-1], eps))
|
||||
}
|
||||
|
||||
# priors
|
||||
alpha ~ dunif(0.01,1) # needed for fit of first biomass
|
||||
#inverse cubic root relationship between the range of viable r and the size of the search space
|
||||
inverseRangeFactor <- 1/((start_r[2]-start_r[1])^1/3)
|
||||
|
||||
# give sigma some variability in the inverse relationship
|
||||
sigma.r ~ dunif(0.001*inverseRangeFactor,0.02*inverseRangeFactor)
|
||||
tau.r <- pow(sigma.r,-2)
|
||||
rm <- log((start_r[1]+start_r[2])/2)
|
||||
r ~ dlnorm(rm,tau.r)
|
||||
|
||||
# search in the k space from the center of the range. Allow high variability
|
||||
km <- log((start_k[1]+start_k[2])/2)
|
||||
tau.k <- pow(km,-2)
|
||||
k ~ dlnorm(km,tau.k)
|
||||
|
||||
#end model
|
||||
}"
|
||||
|
||||
# Write JAGS model to file
|
||||
cat(Model, file="r2jags.bug")
|
||||
|
||||
### random seed
|
||||
set.seed(runif(1,1,500)) # needed in JAGS
|
||||
|
||||
### run model
|
||||
jags_outputs <- jags(data=c('ct','bio','nyr', 'start_r', 'start_k'),
|
||||
working.directory=NULL, inits=NULL,
|
||||
parameters.to.save= jags.save.params,
|
||||
model.file="r2jags.bug", n.chains = mcmc.chains,
|
||||
n.burnin = mcmc.burn, n.thin = mcmc.thin, n.iter = mcmc.chainLength,
|
||||
refresh=mcmc.burn/20, )
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Results from JAGS Schaefer
|
||||
# ------------------------------------------------------
|
||||
r_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$r))
|
||||
k_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$k))
|
||||
## sigma_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$sigma.b))
|
||||
alpha_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$alpha))
|
||||
## sigma.r_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$sigma.r))
|
||||
|
||||
mean.log.r.jags <- mean(log(r_out))
|
||||
SD.log.r.jags <- sd(log(r_out))
|
||||
lcl.log.r.jags <- mean.log.r.jags-1.96*SD.log.r.jags
|
||||
ucl.log.r.jags <- mean.log.r.jags+1.96*SD.log.r.jags
|
||||
gm.r.jags <- exp(mean.log.r.jags)
|
||||
lcl.r.jags <- exp(lcl.log.r.jags)
|
||||
ucl.r.jags <- exp(ucl.log.r.jags)
|
||||
mean.log.k.jags <- mean(log(k_out))
|
||||
SD.log.k.jags <- sd(log(k_out))
|
||||
lcl.log.k.jags <- mean.log.k.jags-1.96*SD.log.k.jags
|
||||
ucl.log.k.jags <- mean.log.k.jags+1.96*SD.log.k.jags
|
||||
gm.k.jags <- exp(mean.log.k.jags)
|
||||
lcl.k.jags <- exp(lcl.log.k.jags)
|
||||
ucl.k.jags <- exp(ucl.log.k.jags)
|
||||
mean.log.MSY.jags<- mean(log(r_out)+log(k_out)-log(4))
|
||||
SD.log.MSY.jags <- sd(log(r_out)+log(k_out)-log(4))
|
||||
gm.MSY.jags <- exp(mean.log.MSY.jags)
|
||||
lcl.MSY.jags <- exp(mean.log.MSY.jags-1.96*SD.log.MSY.jags)
|
||||
ucl.MSY.jags <- exp(mean.log.MSY.jags+1.96*SD.log.MSY.jags)
|
||||
|
||||
} # end of MCMC Schaefer loop
|
||||
|
||||
#------------------------------------
|
||||
# get results from CMSY
|
||||
#------------------------------------
|
||||
# get estimate of most probable r as median of mid log.r-classes above cut-off
|
||||
# get remaining viable log.r and log.k
|
||||
rem.log.r <- log(rv.all[rv.all > gm.rv])
|
||||
rem.log.k <- log(kv.all[rv.all>gm.rv])
|
||||
# get vectors with numbers of r and mid values in about 25 classes
|
||||
hist.log.r <- hist(x=rem.log.r, breaks=25, plot=F)
|
||||
log.r.counts <- hist.log.r$counts
|
||||
log.r.mids <- hist.log.r$mids
|
||||
# get most probable log.r as mean of mids with counts > 0
|
||||
log.r.est <- median(log.r.mids[which(log.r.counts > 0)])
|
||||
lcl.log.r <- as.numeric(quantile(x=log.r.mids[which(log.r.counts > 0)], 0.025))
|
||||
ucl.log.r <- as.numeric(quantile(x=log.r.mids[which(log.r.counts > 0)], 0.975))
|
||||
r.est <- exp(log.r.est)
|
||||
lcl.r.est <- exp(lcl.log.r)
|
||||
ucl.r.est <- exp(ucl.log.r)
|
||||
|
||||
# do linear regression of log k ~ log r with slope fixed to -1 (from Schaefer)
|
||||
reg <- lm(rem.log.k ~ 1 + offset(-1*rem.log.r))
|
||||
int.reg <- as.numeric(reg[1])
|
||||
sd.reg <- sd(resid(reg))
|
||||
se.reg <- summary(reg)$coefficients[2]
|
||||
# get estimate of log(k) from y where x = log.r.est
|
||||
log.k.est <- int.reg + (-1) * log.r.est
|
||||
# get estimates of CL of log.k.est from y +/- SD where x = lcl.log r or ucl.log.r
|
||||
lcl.log.k <- int.reg + (-1) * ucl.log.r - sd.reg
|
||||
ucl.log.k <- int.reg + (-1) * lcl.log.r + sd.reg
|
||||
k.est <- exp(log.k.est)
|
||||
lcl.k.est <- exp(lcl.log.k)
|
||||
ucl.k.est <- exp(ucl.log.k)
|
||||
|
||||
# get MSY from remaining log r-k pairs
|
||||
log.MSY.est <- mean(rem.log.r + rem.log.k - log(4))
|
||||
sd.log.MSY.est <- sd(rem.log.r + rem.log.k - log(4))
|
||||
lcl.log.MSY.est <- log.MSY.est - 1.96*sd.log.MSY.est
|
||||
ucl.log.MSY.est <- log.MSY.est + 1.96*sd.log.MSY.est
|
||||
MSY.est <- exp(log.MSY.est)
|
||||
lcl.MSY.est <- exp(lcl.log.MSY.est)
|
||||
ucl.MSY.est <- exp(ucl.log.MSY.est)
|
||||
|
||||
# get predicted biomass vectors as median and quantiles of trajectories
|
||||
median.btv <- apply(btv.all,2, median)
|
||||
lastyr.bio <- median.btv[length(median.btv)-1]
|
||||
nextyr.bio <- median.btv[length(median.btv)]
|
||||
lcl.btv <- apply(btv.all,2, quantile, probs=0.025)
|
||||
q.btv <- apply(btv.all,2, quantile, probs=0.25)
|
||||
ucl.btv <- apply(btv.all,2, quantile, probs=0.975)
|
||||
lcl.lastyr.bio <- lcl.btv[length(lcl.btv)-1]
|
||||
ucl.lastyr.bio <- ucl.btv[length(lcl.btv)-1]
|
||||
lcl.nextyr.bio <- lcl.btv[length(lcl.btv)]
|
||||
ucl.nextyr.bio <- ucl.btv[length(lcl.btv)]
|
||||
|
||||
# -----------------------------------------
|
||||
# Plot results
|
||||
# -----------------------------------------
|
||||
# Analysis of viable r-k pairs
|
||||
plot(x=rv.all, y=kv.all, xlim=start_r,
|
||||
ylim=c(0.9*min(kv.all, ifelse(Btype == "observed",k_out,NA), na.rm=T), 1.1*max(kv.all)),
|
||||
pch=16, col="grey",log="xy", bty="l",
|
||||
xlab="r", ylab="k", main="Analysis of viable r-k")
|
||||
abline(v=gm.rv, lty="dashed")
|
||||
|
||||
# plot points and best estimate from full Schaefer analysis
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
# plot r-k pairs from MCMC
|
||||
points(x=r_out, y=k_out, pch=16,cex=0.5)
|
||||
# plot best r-k pair from MCMC
|
||||
points(x=gm.r.jags, y=gm.k.jags, pch=19, col="green")
|
||||
lines(x=c(lcl.r.jags, ucl.r.jags),y=c(gm.k.jags,gm.k.jags), col="green")
|
||||
lines(x=c(gm.r.jags,gm.r.jags),y=c(lcl.k.jags, ucl.k.jags), col="green")
|
||||
}
|
||||
|
||||
# if data are from simulation, plot true r and k
|
||||
if(Btype=="simulated") {
|
||||
l.stock <- nchar(stock) # get length of sim stock name
|
||||
r.char <- substr(stock,l.stock-1,l.stock) # get last character of sim stock name
|
||||
r.sim <- NA # initialize vector for r used in simulation
|
||||
if(r.char=="_H") {r.sim=1; lcl.r.sim=0.8; ucl.r.sim=1.25} else
|
||||
if(r.char=="_M") {r.sim=0.5;lcl.r.sim=0.4;ucl.r.sim=0.62} else
|
||||
if(r.char=="_L") {r.sim=0.25;lcl.r.sim=0.2;ucl.r.sim=0.31} else {r.sim=0.05;lcl.r.sim=0.04;ucl.r.sim=0.062}
|
||||
# plot true r-k point with error bars
|
||||
points(x=r.sim, y=1000, pch=19, col="red")
|
||||
# add +/- 20% error bars
|
||||
lines(x=c(lcl.r.sim,ucl.r.sim), y=c(1000,1000), col="red")
|
||||
lines(x=c(r.sim,r.sim), y=c(800,1250), col="red")
|
||||
}
|
||||
|
||||
# plot blue dot for proposed r-k, with 95% CL lines
|
||||
points(x=r.est, y=k.est, pch=19, col="blue")
|
||||
lines(x=c(lcl.r.est, ucl.r.est),y=c(k.est,k.est), col="blue")
|
||||
lines(x=c(r.est,r.est),y=c(lcl.k.est, ucl.k.est), col="blue")
|
||||
|
||||
# plot biomass graph
|
||||
# determine k to use for red line in b/k plot
|
||||
if(Btype=="simulated") {k2use <- 1000} else
|
||||
if(Btype == "observed") {k2use <- gm.k.jags} else {k2use <- k.est}
|
||||
# determine hight of y-axis in plot
|
||||
max.y <- max(c(bio/k2use,ucl.btv,0.6,startbio[2], intbio[2],endbio[2]),na.rm=T)
|
||||
|
||||
plot(x=yr,y=median.btv[1:nyr], lwd=2, xlab="Year", ylab="Relative biomass b/k", type="l",
|
||||
ylim=c(0,max.y), bty="l", main=paste("Pred. biomass vs ", Btype,sep=""))
|
||||
lines(x=yr, y=lcl.btv[1:nyr],type="l")
|
||||
lines(x=yr, y=ucl.btv[1:nyr],type="l")
|
||||
points(x=EndYear,y=q.btv[yr==EndYear], col="purple", cex=1.5, lwd=2)
|
||||
abline(h=0.5, lty="dashed")
|
||||
abline(h=0.25, lty="dotted")
|
||||
lines(x=c(yr[1],yr[1]), y=startbio, col="blue")
|
||||
lines(x=c(intyr,intyr), y=intbio, col="blue")
|
||||
lines(x=c(max(yr),max(yr)), y=endbio, col="blue")
|
||||
|
||||
# if observed biomass is available, plot red biomass line
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
lines(x=yr, y=bio/k2use,type="l", col="red", lwd=1)
|
||||
}
|
||||
|
||||
# if CPUE data are available, scale to predicted biomass range, plot red biomass line
|
||||
if(Btype == "CPUE") {
|
||||
par(new=T) # prepares for new plot on top of previous
|
||||
plot(x=yr, y=bio, type="l", col="red", lwd=1,
|
||||
ann=F,axes=F,ylim=c(0,1.2*max(bio, na.rm=T))) # forces this plot on top of previous one
|
||||
axis(4, col="red", col.axis="red")
|
||||
}
|
||||
|
||||
# plot yield and biomass against equilibrium surplus parabola
|
||||
max.y <-max(c(ct/MSY.est,ifelse(Btype=="observed"|Btype=="simulated",ct/gm.MSY.jags,NA),1.2),na.rm=T)
|
||||
# plot parabola
|
||||
x=seq(from=0,to=2,by=0.001)
|
||||
y=4*x-(2*x)^2
|
||||
plot(x=x, y=y, xlim=c(0,1), ylim=c(0,max.y), type="l", bty="l",xlab="Relative biomass b/k",
|
||||
ylab="Catch / MSY", main="Equilibrium curve")
|
||||
# plot catch against CMSY biomass estimates
|
||||
points(x=median.btv[1:nyr], y=ct/MSY.est, pch=16, col="grey")
|
||||
points(x=q.btv[yr==EndYear],y=ct[yr==EndYear]/MSY.est, col="purple", cex=1.5, lwd=2)
|
||||
# plot catch against observed biomass or CPUE
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
points(x=bio/k2use, y=ct/gm.MSY.jags, pch=16, cex=0.5)
|
||||
}
|
||||
|
||||
# plot exploitation rate u against u.msy
|
||||
# get u derived from predicted CMSY biomass
|
||||
u.CMSY <- ct/(median.btv[1:nyr]*k.est)
|
||||
u.msy.CMSY <- 1-exp(-r.est/2) # # Fmsy from CMSY expressed as exploitation rate
|
||||
# get u from observed or simulated biomass
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
u.bio <- ct/bio
|
||||
u.msy.bio <- 1-exp(-gm.r.jags/2)
|
||||
}
|
||||
# get u from CPUE
|
||||
if(Btype == "CPUE") {
|
||||
q=max(median.btv[1:nyr][is.na(bio)==F],na.rm=T)*k.est/max(bio,na.rm=T)
|
||||
u.CPUE <- ct/(q*bio)
|
||||
}
|
||||
|
||||
# determine upper bound of Y-axis
|
||||
max.y <- max(c(1.5, 1.2*u.CMSY/u.msy.CMSY,ct[yr==EndYear]/(q.btv[yr==EndYear]*k.est)/u.msy.CMSY,
|
||||
ifelse(Btype=="observed"|Btype=="simulated",max(u.bio[is.na(u.bio)==F]/u.msy.bio),0),
|
||||
na.rm=T))
|
||||
# plot u from CMSY
|
||||
plot(x=yr,y=u.CMSY/u.msy.CMSY, type="l", bty="l", ylim=c(0,max.y), xlab="Year",
|
||||
ylab="u / u_msy", main="Exploitation rate")
|
||||
abline(h=1, lty="dashed")
|
||||
points(x=EndYear,y=ct[yr==EndYear]/(q.btv[yr==EndYear]*k.est)/u.msy.CMSY, col="purple", cex=1.5, lwd=2)
|
||||
# plot u from biomass
|
||||
if(Btype == "observed"|Btype=="simulated") lines(x=yr, y=u.bio/u.msy.bio, col="red")
|
||||
# plot u from CPUE
|
||||
if(Btype == "CPUE") {
|
||||
par(new=T) # prepares for new plot on top of previous
|
||||
plot(x=yr, y=u.CPUE, type="l", col="red", ylim=c(0, 1.2*max(u.CPUE,na.rm=T)),ann=F,axes=F)
|
||||
axis(4, col="red", col.axis="red")
|
||||
}
|
||||
if(batch.mode == TRUE) {dev.off()} # close plot window
|
||||
|
||||
# ------------------------------------------
|
||||
# print input and results to screen
|
||||
cat("---------------------------------------\n")
|
||||
|
||||
cat("Species:", cinfo$ScientificName[cinfo$stock==stock], "\n")
|
||||
cat("Name and region:", cinfo$EnglishName[cinfo$stock==stock], ",", cinfo$Name[cinfo$stock==stock], "\n")
|
||||
cat("Stock:",stock,"\n")
|
||||
cat("Catch data used from years", min(yr),"-", max(yr), "\n")
|
||||
cat("Prior initial relative biomass =", startbio[1], "-", startbio[2], "\n")
|
||||
cat("Prior intermediate rel. biomass=", intbio[1], "-", intbio[2], "in year", intyr, "\n")
|
||||
cat("Prior final relative biomass =", endbio[1], "-", endbio[2], "\n")
|
||||
cat("If current catches continue, is the stock likely to crash within 3 years?",FutureCrash,"\n")
|
||||
cat("Prior range for r =", format(start_r[1],digits=2), "-", format(start_r[2],digits=2),
|
||||
", prior range for k =", start_k[1], "-", start_k[2],"\n")
|
||||
|
||||
# if data are simulated, print true r-k
|
||||
if(filename_1=="SimCatch.csv") {
|
||||
cat("True r =", r.sim, "(because input data were simulated with Schaefer model)\n")
|
||||
cat("True k = 1000 \n")
|
||||
cat("True MSY =", 1000*r.sim/4,"\n")
|
||||
cat("True biomass in last year =",bio[length(bio)],"or",bio[length(bio)]/1000,"k \n")
|
||||
cat("True mean catch / MSY ratio =", mean(ct)/(1000*r.sim/4),"\n")
|
||||
}
|
||||
# print results from full Schaefer if available
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
cat("Results from Bayesian Schaefer model using catch & biomass (",Btype,")\n")
|
||||
cat("MSY =", gm.MSY.jags,", 95% CL =", lcl.MSY.jags, "-", ucl.MSY.jags,"\n")
|
||||
cat("Mean catch / MSY =", mean(ct)/gm.MSY.jags,"\n")
|
||||
if(Btype != "CPUE") {
|
||||
cat("r =", gm.r.jags,", 95% CL =", lcl.r.jags, "-", ucl.r.jags,"\n")
|
||||
cat("k =", gm.k.jags,", 95% CL =", lcl.k.jags, "-", ucl.k.jags,"\n")
|
||||
}
|
||||
}
|
||||
# results of CMSY analysis
|
||||
cat("Results of CMSY analysis \n")
|
||||
cat("Altogether", nviablepoints,"unique viable r-k pairs were found \n")
|
||||
cat(nviablepoints-length(rem.log.r),"r-k pairs above the initial geometric mean of r =", gm.rv, "were analysed\n")
|
||||
cat("r =", r.est,", 95% CL =", lcl.r.est, "-", ucl.r.est,"\n")
|
||||
cat("k =", k.est,", 95% CL =", lcl.k.est, "-", ucl.k.est,"\n")
|
||||
cat("MSY =", MSY.est,", 95% CL =", lcl.MSY.est, "-", ucl.MSY.est,"\n")
|
||||
cat("Predicted biomass in last year =", lastyr.bio, "2.5th perc =", lcl.lastyr.bio,
|
||||
"97.5th perc =", ucl.lastyr.bio,"\n")
|
||||
cat("Predicted biomass in next year =", nextyr.bio, "2.5th perc =", lcl.nextyr.bio,
|
||||
"97.5th perc =", ucl.nextyr.bio,"\n")
|
||||
cat("----------------------------------------------------------\n")
|
||||
|
||||
## Write some results into outfile
|
||||
if(write.output == TRUE) {
|
||||
# write data into csv file
|
||||
output = data.frame(cinfo$ScientificName[cinfo$stock==stock], stock, StartYear, EndYear, mean(ct)*1000,
|
||||
ifelse(Btype=="observed"|Btype=="simulate",bio[length(bio)],NA), # last biomass on record
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.MSY.jags,NA), # full Schaefer
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.MSY.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.MSY.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.k.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.k.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.k.jags,NA),
|
||||
r.est, lcl.r.est, ucl.r.est, # CMSY r
|
||||
k.est, lcl.k.est, ucl.k.est, # CMSY k
|
||||
MSY.est, lcl.MSY.est, ucl.MSY.est, # CMSY r
|
||||
lastyr.bio, lcl.lastyr.bio, ucl.lastyr.bio, # last year bio
|
||||
nextyr.bio, lcl.nextyr.bio, ucl.nextyr.bio)# last year + 1 bio
|
||||
|
||||
write.table(output, file=outfile, append = T, sep = ",",
|
||||
dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
|
||||
# write some text into text outfile.txt
|
||||
|
||||
cat("Species:", cinfo$ScientificName[cinfo$stock==stock], "\n",
|
||||
"Name:", cinfo$EnglishName[cinfo$stock==stock], "\n",
|
||||
"Region:", cinfo$Name[cinfo$stock==stock], "\n",
|
||||
"Stock:",stock,"\n",
|
||||
"Catch data used from years", min(yr),"-", max(yr),", biomass =", Btype, "\n",
|
||||
"Prior initial relative biomass =", startbio[1], "-", startbio[2], "\n",
|
||||
"Prior intermediate rel. biomass=", intbio[1], "-", intbio[2], "in year", intyr, "\n",
|
||||
"Prior final relative biomass =", endbio[1], "-", endbio[2], "\n",
|
||||
"Future crash with current catches?", FutureCrash, "\n",
|
||||
"Prior range for r =", format(start_r[1],digits=2), "-", format(start_r[2],digits=2),
|
||||
", prior range for k =", start_k[1], "-", start_k[2],"\n",
|
||||
file=outfile.txt,append=T)
|
||||
|
||||
if(filename_1=="SimCatch.csv") {
|
||||
cat(" True r =", r.sim, "(because input data were simulated with Schaefer model)\n",
|
||||
"True k = 1000, true MSY =", 1000*r.sim/4,"\n",
|
||||
"True biomass in last year =",bio[length(bio)],"or",bio[length(bio)]/1000,"k \n",
|
||||
"True mean catch / MSY ratio =", mean(ct)/(1000*r.sim/4),"\n",
|
||||
file=outfile.txt,append=T)
|
||||
}
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
cat(" Results from Bayesian Schaefer model using catch & biomass \n",
|
||||
"r =", gm.r.jags,", 95% CL =", lcl.r.jags, "-", ucl.r.jags,"\n",
|
||||
"k =", gm.k.jags,", 95% CL =", lcl.k.jags, "-", ucl.k.jags,"\n",
|
||||
"MSY =", gm.MSY.jags,", 95% CL =", lcl.MSY.jags, "-", ucl.MSY.jags,"\n",
|
||||
"Mean catch / MSY =", mean(ct)/gm.MSY.jags,"\n",
|
||||
file=outfile.txt,append=T)
|
||||
}
|
||||
cat(" Results of CMSY analysis with altogether", nviablepoints,"unique viable r-k pairs \n",
|
||||
nviablepoints-length(rem.log.r),"r-k pairs above the initial geometric mean of r =", gm.rv, "were analysed\n",
|
||||
"r =", r.est,", 95% CL =", lcl.r.est, "-", ucl.r.est,"\n",
|
||||
"k =", k.est,", 95% CL =", lcl.k.est, "-", ucl.k.est,"\n",
|
||||
"MSY =", MSY.est,", 95% CL =", lcl.MSY.est, "-", ucl.MSY.est,"\n",
|
||||
"Predicted biomass last year b/k =", lastyr.bio, "2.5th perc b/k =", lcl.lastyr.bio,
|
||||
"97.5th perc b/k =", ucl.lastyr.bio,"\n",
|
||||
"Precautionary 25th percentile b/k =",q.btv[yr==EndYear],"\n",
|
||||
"----------------------------------------------------------\n",
|
||||
file=outfile.txt,append=T)
|
||||
|
||||
}
|
||||
|
||||
} # end of stocks loop
|
|
@ -0,0 +1,435 @@
|
|||
set.seed(999) ## for same random sequence
|
||||
#require(hacks)
|
||||
#13/05/2015
|
||||
#setwd("C:/Users/Ye/Documents/Data poor fisheries/Martell Froese Method/")
|
||||
|
||||
## Read Data for stock, year=yr, catch=ct, and resilience=res. Expects space delimited file with header yr ct and years in integer and catch in real with decimal point
|
||||
## For example
|
||||
## stock res yr ct
|
||||
## cap-icel Medium 1984 1234.32
|
||||
|
||||
## filename <- "RAM_MSY.csv"
|
||||
##filename <- "ICESct2.csv"
|
||||
|
||||
cat("Step 1","\n")
|
||||
TestRUN <- F # if it is true, just run on the test samples, false will go for a formal run!
|
||||
|
||||
filename <- "D20.csv"
|
||||
outfile <- "CatchMSY_Output.csv"
|
||||
outfile2 <- paste("NonProcessedSpecies.csv",sep="")
|
||||
|
||||
#cdat <- read.csv2(filename, header=T, dec=".")
|
||||
cdat1 <- read.csv(filename)
|
||||
cat("\n", "File", filename, "read successfully","\n")
|
||||
|
||||
|
||||
cat("Step 2","\n")
|
||||
if(file.exists("cdat.RData"))
|
||||
{load("cdat.RData")} else
|
||||
{
|
||||
|
||||
dim(cdat1)
|
||||
yrs=1950:2013
|
||||
|
||||
# to set NA as 0
|
||||
cdat1[is.na(cdat1)] <- 0
|
||||
nrow <- length(cdat1[,1])
|
||||
ndatColn <- length(cdat1[1,c(-1:-12)])
|
||||
rownames(cdat1) <- NULL
|
||||
|
||||
cdat <- NULL
|
||||
|
||||
for(i in 1:nrow)
|
||||
#for(i in 1:5)
|
||||
|
||||
{#i=1
|
||||
#a <- ctotal3[i,-1]
|
||||
tmp=data.frame(stock=rep(as.character(cdat1[i,"Stock_ID"]),ndatColn),
|
||||
species=rep(as.character(cdat1[i,"Scientific_name"]),ndatColn),
|
||||
yr=yrs,ct=unlist(c(cdat1[i,-c(1:12)])),
|
||||
res=rep(cdat1[i,"ResilienceIndex"],ndatColn))
|
||||
|
||||
cdat <- rbind(cdat,tmp)
|
||||
#edit(cdat)
|
||||
}
|
||||
save(cdat,file="cdat.RData")
|
||||
}
|
||||
|
||||
StockList=unique(as.character(cdat$stock))
|
||||
|
||||
cat("Step 3","\n")
|
||||
## FUNCTIONS are going to be used subsequently
|
||||
.schaefer <- function(theta)
|
||||
{
|
||||
with(as.list(theta), { ## for all combinations of ri & ki
|
||||
bt=vector()
|
||||
ell = 0 ## initialize ell
|
||||
J=0 #Ye
|
||||
for (j in startbt)
|
||||
{
|
||||
if(ell == 0)
|
||||
{
|
||||
bt[1]=j*k*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r*bt[i]*(1-bt[i]/k)-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
|
||||
#Bernoulli likelihood, assign 0 or 1 to each combination of r and k
|
||||
ell = 0
|
||||
if(bt[nyr+1]/k>=lam1 && bt[nyr+1]/k <=lam2 && min(bt) > 0 && max(bt) <=k && bt[which(yr==interyr)]/k>=interbio[1] && bt[which(yr==interyr)]/k<=interbio[2])
|
||||
ell = 1
|
||||
J=j # Ye
|
||||
}
|
||||
}
|
||||
return(list(ell=ell,J=J)) # Ye adding J=J
|
||||
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
sraMSY <-function(theta, N)
|
||||
{
|
||||
#This function conducts the stock reduction
|
||||
#analysis for N trials
|
||||
#args:
|
||||
# theta - a list object containing:
|
||||
# r (lower and upper bounds for r)
|
||||
# k (lower and upper bounds for k)
|
||||
# lambda (limits for current depletion)
|
||||
|
||||
|
||||
with(as.list(theta),
|
||||
{
|
||||
ri = exp(runif(N, log(r[1]), log(r[2]))) ## get N values between r[1] and r[2], assign to ri
|
||||
ki = exp(runif(N, log(k[1]), log(k[2]))) ## get N values between k[1] and k[2], assing to ki
|
||||
itheta=cbind(r=ri,k=ki, lam1=lambda[1],lam2=lambda[2], sigR=sigR)
|
||||
## assign ri, ki, and final biomass range to itheta
|
||||
M = apply(itheta,1,.schaefer) ## call Schaefer function with parameters in itheta
|
||||
i=1:N
|
||||
## prototype objective function
|
||||
get.ell=function(i) M[[i]]$ell
|
||||
ell = sapply(i, get.ell)
|
||||
get.J=function(i) M[[i]]$J # Ye
|
||||
J=sapply(i,get.J) # Ye
|
||||
return(list(r=ri,k=ki, ell=ell, J=J)) # Ye adding J=J
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
getBiomass <- function(r, k, j)
|
||||
{
|
||||
BT <- NULL
|
||||
bt=vector()
|
||||
for (v in 1:length(r))
|
||||
{
|
||||
bt[1]=j[v]*k[v]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r[v]*bt[i]*(1-bt[i]/k[v])-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
BT=rbind(BT, t(t(bt)))
|
||||
}
|
||||
return(BT)
|
||||
}
|
||||
|
||||
## The End of Functions section
|
||||
|
||||
|
||||
cat("Step 4","\n")
|
||||
stockLoop <- StockList
|
||||
# randomly select stocks from randomly selected 5 area codes first
|
||||
if(TestRUN)
|
||||
{
|
||||
set.seed(999)
|
||||
AreaCodeList <- unique(cdat1$AREA_Code)
|
||||
sampledAC <- sample(AreaCodeList,size=5,replace=F)
|
||||
stockLoop <- cdat1[cdat1$AREA_Code %in% sampledAC,c("Stock_ID")]
|
||||
}
|
||||
|
||||
#setup counters
|
||||
counter1 <- 0
|
||||
counter2 <- 0
|
||||
|
||||
cat("Step 4","\n")
|
||||
## Loop through stocks
|
||||
for(stock in stockLoop)
|
||||
{
|
||||
t0<-Sys.time()
|
||||
##stock = "3845" # NB only for test single loop!
|
||||
## make graph file names:
|
||||
b <- with(cdat1,cdat1[Stock_ID == stock,c(1,3,5,12)]) # Stock_ID,AREA_Names,Country,"Species"
|
||||
bb <- do.call(paste,b)
|
||||
|
||||
yr <- cdat$yr[as.character(cdat$stock)==stock]
|
||||
ct <- as.numeric(cdat$ct[as.character(cdat$stock)==stock])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
res <- unique(as.character(cdat$res[as.character(cdat$stock)==stock])) ## resilience from FishBase, if needed, enable in PARAMETER SECTION
|
||||
nyr <- length(yr) ## number of years in the time series
|
||||
|
||||
cat("\n","Stock",stock,"\n")
|
||||
flush.console()
|
||||
|
||||
## PARAMETER SECTION
|
||||
mvlen=3
|
||||
ma=function(x,n=mvlen){filter(x,rep(1/n,n),sides=1)}
|
||||
|
||||
## If resilience is to be used, delete ## in rows 1-4 below and set ## in row 5 below
|
||||
start_r <- if(res == "Very low"){c(0.015, 0.1)}else{
|
||||
if(res == "Low") {c(0.05,0.5)}else {
|
||||
if(res == "High") {c(0.6,1.5)}else {c(0.2,1)}
|
||||
}
|
||||
}
|
||||
## Medium, or default if no res is found
|
||||
##start_r <- c(0.5,1.5) ## disable this line if you use resilience
|
||||
start_k <- c(max(ct),50*max(ct)) ## default for upper k e.g. 100 * max catch
|
||||
## startbio <- c(0.8,1) ## assumed biomass range at start of time series, as fraction of k
|
||||
##startbio <- if(ct[1]/max(ct) < 0.5) {c(0.5,0.9)} else {c(0.3,0.6)} ## use for batch processing
|
||||
|
||||
## NB: Yimin's new idea on 20Jan14
|
||||
startbio<- if(mean(ct[1:5])/max(ct) < 0.3) {c(0.6,0.95)} else {
|
||||
if(mean(ct[1:5])/max(ct)>0.3&mean(ct[1:5])/max(ct)<0.6) {c(0.3,0.7)} else {
|
||||
c(0.2,0.6)}}
|
||||
|
||||
interyr <- yr[2] ## interim year within time series for which biomass estimate is available; set to yr[2] if no estimates are available
|
||||
interbio <- c(0, 1) ## biomass range for interim year, as fraction of k; set to 0 and 1 if not available
|
||||
## finalbio <- c(0.8, 0.9) ## biomass range after last catches, as fraction of k
|
||||
## finalbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.3,0.7)} else {c(0.01,0.4)} ## use for batch processing
|
||||
|
||||
## Yimin's new stuff on 10Mar14
|
||||
#######> pre-classification
|
||||
|
||||
pre.clas=ct
|
||||
pre.clas[pre.clas==0]=0.1
|
||||
tx=ma(as.numeric(pre.clas),n=mvlen)
|
||||
Myr=which.max(tx)
|
||||
Maxc=pre.clas[which.max(tx)]
|
||||
|
||||
|
||||
if(Myr==1)startbio=c(0.05,0.6)else
|
||||
{
|
||||
if (ct[1]/Maxc>=0.5) startbio=c(0.4,0.85)
|
||||
else startbio=c(0.65,0.95)
|
||||
}
|
||||
|
||||
if (Myr==length(yr))finalbio=c(.4,.95) else # ie from fully to overexploited
|
||||
{
|
||||
if (tx[length(ct)]/Maxc>=0.5) finalbio=c(.4,.85)
|
||||
else finalbio=c(.05,.6)
|
||||
}
|
||||
|
||||
|
||||
# if (Myr==length(yr))finalbio=c(.5,.9)
|
||||
# #if (Myr<length(yr)){
|
||||
# # if ((tx[length(ct)]/Maxc)>=0.8) finalbio=c(.4,.8) else
|
||||
# # if (tx[length(ct)]/Maxc>0.5) finalbio=c(.3,.7) else finalbio=c(.05,.6)}
|
||||
# # below is the last used (20 Feb)
|
||||
# if (Myr<length(yr))
|
||||
# {
|
||||
# if (tx[length(ct)]/Maxc>0.5) finalbio=c(.2,.8)
|
||||
# else finalbio=c(.05,.6)
|
||||
# }
|
||||
|
||||
##############<
|
||||
n <- 30000 ## number of iterations, e.g. 100000
|
||||
sigR <- 0.0 ## process error; 0 if deterministic model; 0.05 reasonable value? 0.2 is too high
|
||||
|
||||
startbt <- seq(startbio[1], startbio[2], by = 0.05) ## apply range of start biomass in steps of 0.05
|
||||
parbound <- list(r = start_r, k = start_k, lambda = finalbio, sigR)
|
||||
|
||||
cat("Last year =",max(yr),", last catch =",1000*ct[nyr],"\n")
|
||||
cat("Resilience =",res,"\n")
|
||||
cat("Process error =", sigR,"\n")
|
||||
cat("Assumed initial biomass (B/k) =", startbio[1],"-", startbio[2], " k","\n")
|
||||
cat("Assumed intermediate biomass (B/k) in", interyr, " =", interbio[1],"-",interbio[2]," k","\n")
|
||||
cat("Assumed final biomass (B/k) =", parbound$lambda[1],"-",parbound$lambda[2]," k","\n")
|
||||
cat("Initial bounds for r =", parbound$r[1], "-", parbound$r[2],"\n")
|
||||
cat("Initial bounds for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
flush.console()
|
||||
|
||||
## MAIN
|
||||
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k, MSY and determine new bounds for r and k
|
||||
r1 <- R1$r[R1$ell==1]
|
||||
k1 <- R1$k[R1$ell==1]
|
||||
j1 <- R1$J[R1$ell==1] # Ye
|
||||
msy1 <- r1*k1/4
|
||||
mean_msy1 <- exp(mean(log(msy1)))
|
||||
max_k1a <- min(k1[r1<1.1*parbound$r[1]]) ## smallest k1 near initial lower bound of r
|
||||
max_k1b <- max(k1[r1*k1/4<mean_msy1]) ## largest k1 that gives mean MSY
|
||||
max_k1 <- if(max_k1a < max_k1b) {max_k1a} else {max_k1b}
|
||||
|
||||
if(length(r1)<10)
|
||||
{
|
||||
cat("Too few (", length(r1), ") possible r-k combinations,
|
||||
check input parameters","\n")
|
||||
appendPar <- ifelse(counter1==0,F,T)
|
||||
colnamePar <- ifelse(counter1==0,T,F)
|
||||
|
||||
NoModellingSpe <- as.data.frame(cbind(stock,length(r1),b))
|
||||
names(NoModellingSpe) <- c("Stock","No_of_r1",names(b))
|
||||
write.table(NoModellingSpe,file=outfile2,
|
||||
append = appendPar, row.names = FALSE,
|
||||
col.names=colnamePar,sep=",")
|
||||
flush.console()
|
||||
counter1 <- counter1 + 1
|
||||
}
|
||||
|
||||
if(length(r1)>=10)
|
||||
{
|
||||
## set new upper bound of r to 1.2 max r1
|
||||
parbound$r[2] <- 1.2*max(r1)
|
||||
## set new lower bound for k to 0.9 min k1 and upper bound to max_k1
|
||||
parbound$k <- c(0.9 * min(k1), max_k1)
|
||||
|
||||
cat("First MSY =", format(1000*mean_msy1, digits=3),"\n")
|
||||
cat("First r =", format(exp(mean(log(r1))), digits=3),"\n")
|
||||
cat("New upper bound for r =", format(parbound$r[2],digits=2),"\n")
|
||||
cat("New range for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
## Repeat analysis with new r-k bounds
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k and msy
|
||||
r = R1$r[R1$ell==1]
|
||||
k = R1$k[R1$ell==1]
|
||||
j = R1$J[R1$ell==1] # Ye
|
||||
msy = r * k / 4
|
||||
mean_ln_msy = mean(log(msy))
|
||||
|
||||
##############################################################
|
||||
##> Ye
|
||||
# BT=0
|
||||
|
||||
##
|
||||
R2<-getBiomass(r, k, j)
|
||||
|
||||
#R2<-R2[-1,]
|
||||
runs<-rep(1:length(r), each=nyr+1)
|
||||
years=rep(yr[1]:(yr[length(yr)]+1),length=length(r)*(length(yr)+1))
|
||||
|
||||
runs=t(runs)
|
||||
years=t(years)
|
||||
stock_id=rep(stock,length(runs))
|
||||
R3<-cbind(as.numeric(runs), as.numeric(years), stock_id, as.numeric(R2) )
|
||||
|
||||
## changed this, as otherwise biomass is the level of the factor below
|
||||
R4<-data.frame(R3, stringsAsFactors=FALSE)
|
||||
names(R4)<-c("Run", "Year", "Stock","Biomass")
|
||||
|
||||
Bmsy_x<-k*0.5
|
||||
Run<-c(1:length(r))
|
||||
BMSY<-cbind(Run, Bmsy_x)
|
||||
R5<-merge(R4, BMSY, by="Run", all.x=T, all.y=F)
|
||||
R5$B_Bmsy<-as.numeric(paste(R5$Biomass))/R5$Bmsy_x
|
||||
|
||||
### B/Bmsy calculated for all feasible combinations of r,K,B0
|
||||
R6<-aggregate(log(B_Bmsy)~as.numeric(Year)+Stock, data=R5,
|
||||
FUN=function(z){c(mean=mean(z),sd=sd(z),upr=exp(quantile(z, p=0.975)),
|
||||
lwr=exp(quantile(z, p=0.025)), lwrQ=exp(quantile(z, p=0.25)),
|
||||
uprQ=exp(quantile(z, p=0.75)))}) # from directly calculated from R5 becasue B_Bmsy has a lognormal dist
|
||||
|
||||
R6<-data.frame(cbind(R6[,1:2],R6[,3][,1],R6[,3][,2],R6[,3][,3],R6[,3][,4],R6[,3][,5], R6[,3][,6]))
|
||||
names(R6)<-c("Year", "Stock", "BoverBmsy", "BoverBmsySD","BoverBmsyUpper","BoverBmsyLower","BoverBmsylwrQ","BoverBmsyuprQ")
|
||||
##remove last entry as it is 1 greater than number of years
|
||||
## removed final year here for ease of dataframe output below
|
||||
R6<-R6[-length(R6),]
|
||||
## geometric mean
|
||||
GM_B_Bmsy<-exp(R6$BoverBmsy)
|
||||
GM_B_BmsySD=R6$BoverBmsySD #add
|
||||
## arithmetic mean
|
||||
M_B_Bmsy<-exp(R6$BoverBmsy+R6$BoverBmsySD^2/2)
|
||||
|
||||
### r,k, and MSY
|
||||
|
||||
#del GM_B_Bmsy=c(rep(0,(min(yr)-1940)),GM_B_Bmsy)
|
||||
#del GM_B_BmsySD=c(rep(0,(min(yr)-1940)),GM_B_BmsySD) ######
|
||||
#del M_B_Bmsy=c(rep(0,(min(yr)-1940)),M_B_Bmsy)
|
||||
#del yr1=seq(1940,max(yr))
|
||||
|
||||
yr1=yr #add
|
||||
|
||||
stockInfo <- with(cdat1,cdat1[Stock_ID==stock,1:12])
|
||||
temp=c(startbio[1],startbio[2],finalbio[1],finalbio[2],res,
|
||||
mean(log(r)),sd(log(r)),mean(log(k)),sd(log(k)),mean(log(msy)),
|
||||
sd(log(msy)),sigR,min(yr),max(yr),max(ct),length(r),GM_B_Bmsy,GM_B_BmsySD,M_B_Bmsy)
|
||||
|
||||
#add, adding "GM_B_BmsySD" in the line above
|
||||
|
||||
output=as.data.frame(matrix(temp,nrow=1))
|
||||
output <- cbind(stockInfo,output)
|
||||
names(output) <- c(names(cdat1)[1:12],"startbio[1]","startbio[2]","finalbio[1]","finalbio[2]",
|
||||
"res","mean(log(r))","sd(log(r))","mean(log(k))","sd(log(k))",
|
||||
"mean(log(msy))","sd(log(msy))","sigR","min(yr)","max(yr)","max(ct)",
|
||||
"length(r)",paste("GM_B_msy",yr1,sep="_"),paste("GM_B_msySD",yr1,sep="_"),paste("M_B_Bmsy",yr1,sep="_"))
|
||||
|
||||
#add, adding "paste("GM_B_msySD",yr1,sep="_")"in the line above
|
||||
|
||||
######< Ye
|
||||
########################################################
|
||||
|
||||
## plot MSY over catch data
|
||||
pdf(paste(bb,"graph.pdf",sep="_"))
|
||||
|
||||
par(mfcol=c(2,3))
|
||||
plot(yr, ct, type="l", ylim = c(0, max(ct)), xlab = "Year",
|
||||
ylab = "Catch (1000 t)",main = paste("StockID",stock,sep=":"))
|
||||
abline(h=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(h=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(h=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
hist(r, freq=F, xlim=c(0, 1.2 * max(r)), main = "")
|
||||
abline(v=exp(mean(log(r))),col="red",lwd=2)
|
||||
abline(v=exp(mean(log(r))-2*sd(log(r))),col="red")
|
||||
abline(v=exp(mean(log(r))+2*sd(log(r))),col="red")
|
||||
|
||||
plot(r1, k1, xlim = start_r, ylim = start_k, xlab="r", ylab="k (1000t)")
|
||||
|
||||
hist(k, freq=F, xlim=c(0, 1.2 * max(k)), xlab="k (1000t)", main = "")
|
||||
abline(v=exp(mean(log(k))),col="red", lwd=2)
|
||||
abline(v=exp(mean(log(k))-2*sd(log(k))),col="red")
|
||||
abline(v=exp(mean(log(k))+2*sd(log(k))),col="red")
|
||||
|
||||
|
||||
plot(log(r), log(k),xlab="ln(r)",ylab="ln(k)")
|
||||
abline(v=mean(log(r)))
|
||||
abline(h=mean(log(k)))
|
||||
abline(mean(log(msy))+log(4),-1, col="red",lwd=2)
|
||||
abline(mean(log(msy))-2*sd(log(msy))+log(4),-1, col="red")
|
||||
abline(mean(log(msy))+2*sd(log(msy))+log(4),-1, col="red")
|
||||
|
||||
hist(msy, freq=F, xlim=c(0, 1.2 * max(msy)), xlab="MSY (1000t)",main = "")
|
||||
abline(v=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(v=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(v=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
graphics.off()
|
||||
|
||||
cat("Possible combinations = ", length(r),"\n")
|
||||
cat("geom. mean r =", format(exp(mean(log(r))),digits=3), "\n")
|
||||
cat("r +/- 2 SD =", format(exp(mean(log(r))-2*sd(log(r))),digits=3),"-",format(exp(mean(log(r))+2*sd(log(r))),digits=3), "\n")
|
||||
cat("geom. mean k =", format(1000*exp(mean(log(k))),digits=3), "\n")
|
||||
cat("k +/- 2 SD =", format(1000*exp(mean(log(k))-2*sd(log(k))),digits=3),"-",format(1000*exp(mean(log(k))+2*sd(log(k))),digits=3), "\n")
|
||||
cat("geom. mean MSY =", format(1000*exp(mean(log(msy))),digits=3),"\n")
|
||||
cat("MSY +/- 2 SD =", format(1000*exp(mean_ln_msy - 2 * sd(log(msy))),digits=3), "-", format(1000*exp(mean_ln_msy + 2 * sd(log(msy))),digits=3), "\n")
|
||||
|
||||
## Write results into outfile, in append mode (no header in file, existing files will be continued)
|
||||
## output = data.frame(stock, sigR, startbio[1], startbio[2], interbio[1], interbio[2], finalbio[1], finalbio[2], min(yr), max(yr), res, max(ct), ct[1], ct[nyr], length(r), exp(mean(log(r))), sd(log(r)), min(r), quantile(r,0.05), quantile(r,0.25), median(r), quantile(r,0.75), quantile(r,0.95), max(r), exp(mean(log(k))), sd(log(k)), min(k), quantile(k, 0.05), quantile(k, 0.25), median(k), quantile(k, 0.75), quantile(k, 0.95), max(k), exp(mean(log(msy))), sd(log(msy)), min(msy), quantile(msy, 0.05), quantile(msy, 0.25), median(msy), quantile(msy, 0.75), quantile(msy, 0.95), max(msy))
|
||||
|
||||
#write.table(output, file = outfile, append = TRUE, sep = ";", dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
appendPar <- ifelse(counter2==0,F,T)
|
||||
colnamePar <- ifelse(counter2==0,T,F)
|
||||
write.table(output, file = outfile, append = appendPar, sep = ",", dec = ".",
|
||||
row.names = FALSE, col.names = colnamePar)
|
||||
|
||||
counter2 <- counter2 + 1
|
||||
|
||||
}
|
||||
cat("Elapsed: ",Sys.time()-t0," \n")
|
||||
} ## End of stock loop, get next stock or exit
|
|
@ -0,0 +1,440 @@
|
|||
set.seed(999) ## for same random sequence
|
||||
#require(hacks)
|
||||
|
||||
#setwd("C:/Users/Ye/Documents/Data poor fisheries/Martell Froese Method/")
|
||||
|
||||
## Read Data for stock, year=yr, catch=ct, and resilience=res. Expects space delimited file with header yr ct and years in integer and catch in real with decimal point
|
||||
## For example
|
||||
## stock res yr ct
|
||||
## cap-icel Medium 1984 1234.32
|
||||
|
||||
## filename <- "RAM_MSY.csv"
|
||||
##filename <- "ICESct2.csv"
|
||||
|
||||
cat("Step 1","\n")
|
||||
TestRUN <- F # if it is true, just run on the test samples, false will go for a formal run!
|
||||
|
||||
filename <- "D20.csv"
|
||||
outfile <- "CatchMSY_Output.csv"
|
||||
outfile2 <- paste("NonProcessedSpecies.csv",sep="")
|
||||
|
||||
#cdat <- read.csv2(filename, header=T, dec=".")
|
||||
cdat1 <- read.csv(filename)
|
||||
cat("\n", "File", filename, "read successfully","\n")
|
||||
|
||||
cat("Step 2","\n")
|
||||
if(file.exists("cdat.RData"))
|
||||
{load("cdat.RData")} else
|
||||
{
|
||||
|
||||
dim(cdat1)
|
||||
yrs=1950:2012
|
||||
|
||||
# to set NA as 0
|
||||
cdat1[is.na(cdat1)] <- 0
|
||||
nrow <- length(cdat1[,1])
|
||||
ndatColn <- length(cdat1[1,c(-1:-12)])
|
||||
rownames(cdat1) <- NULL
|
||||
|
||||
cdat <- NULL
|
||||
for(i in 1:nrow)
|
||||
{#i=1
|
||||
#a <- ctotal3[i,-1]
|
||||
tmp=data.frame(stock=rep(as.character(cdat1[i,"Stock_ID"]),ndatColn),
|
||||
species=rep(as.character(cdat1[i,"Scientific_name"]),ndatColn),
|
||||
yr=yrs,ct=unlist(c(cdat1[i,c(-1:-12)])),
|
||||
res=rep(cdat1[i,"ResilienceIndex"],ndatColn))
|
||||
|
||||
cdat <- rbind(cdat,tmp)
|
||||
#edit(cdat)
|
||||
}
|
||||
}
|
||||
|
||||
StockList=unique(as.character(cdat$stock))
|
||||
|
||||
colnames(cdat)
|
||||
|
||||
|
||||
#stock_id <- unique(as.character(cdat$stock))
|
||||
#??
|
||||
# stock_id <- "cod-2224" ## for selecting individual stocks
|
||||
# stock=stock_id
|
||||
#??
|
||||
|
||||
cat("Step 3","\n")
|
||||
|
||||
## FUNCTIONS are going to be used subsequently
|
||||
.schaefer <- function(theta)
|
||||
{
|
||||
with(as.list(theta), { ## for all combinations of ri & ki
|
||||
bt=vector()
|
||||
ell = 0 ## initialize ell
|
||||
J=0 #Ye
|
||||
for (j in startbt)
|
||||
{
|
||||
if(ell == 0)
|
||||
{
|
||||
bt[1]=j*k*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r*bt[i]*(1-bt[i]/k)-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
|
||||
#Bernoulli likelihood, assign 0 or 1 to each combination of r and k
|
||||
ell = 0
|
||||
if(bt[nyr+1]/k>=lam1 && bt[nyr+1]/k <=lam2 && min(bt) > 0 && max(bt) <=k && bt[which(yr==interyr)]/k>=interbio[1] && bt[which(yr==interyr)]/k<=interbio[2])
|
||||
ell = 1
|
||||
J=j # Ye
|
||||
}
|
||||
}
|
||||
return(list(ell=ell,J=J)) # Ye adding J=J
|
||||
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
sraMSY <-function(theta, N)
|
||||
{
|
||||
#This function conducts the stock reduction
|
||||
#analysis for N trials
|
||||
#args:
|
||||
# theta - a list object containing:
|
||||
# r (lower and upper bounds for r)
|
||||
# k (lower and upper bounds for k)
|
||||
# lambda (limits for current depletion)
|
||||
|
||||
|
||||
with(as.list(theta),
|
||||
{
|
||||
ri = exp(runif(N, log(r[1]), log(r[2]))) ## get N values between r[1] and r[2], assign to ri
|
||||
ki = exp(runif(N, log(k[1]), log(k[2]))) ## get N values between k[1] and k[2], assing to ki
|
||||
itheta=cbind(r=ri,k=ki, lam1=lambda[1],lam2=lambda[2], sigR=sigR)
|
||||
## assign ri, ki, and final biomass range to itheta
|
||||
M = apply(itheta,1,.schaefer) ## call Schaefer function with parameters in itheta
|
||||
i=1:N
|
||||
## prototype objective function
|
||||
get.ell=function(i) M[[i]]$ell
|
||||
ell = sapply(i, get.ell)
|
||||
get.J=function(i) M[[i]]$J # Ye
|
||||
J=sapply(i,get.J) # Ye
|
||||
return(list(r=ri,k=ki, ell=ell, J=J)) # Ye adding J=J
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
getBiomass <- function(r, k, j)
|
||||
{
|
||||
BT <- NULL
|
||||
bt=vector()
|
||||
for (v in 1:length(r))
|
||||
{
|
||||
bt[1]=j[v]*k[v]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r[v]*bt[i]*(1-bt[i]/k[v])-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
BT=rbind(BT, t(t(bt)))
|
||||
}
|
||||
return(BT)
|
||||
}
|
||||
|
||||
## The End of Functions section
|
||||
|
||||
cat("Step 4","\n")
|
||||
stockLoop <- StockList
|
||||
# randomly select stocks from randomly selected 5 area codes first
|
||||
if(TestRUN)
|
||||
{
|
||||
set.seed(999)
|
||||
AreaCodeList <- unique(cdat1$AREA_Code)
|
||||
sampledAC <- sample(AreaCodeList,size=5,replace=F)
|
||||
stockLoop <- cdat1[cdat1$AREA_Code %in% sampledAC,c("Stock_ID")]
|
||||
}
|
||||
|
||||
#setup counters
|
||||
counter1 <- 0
|
||||
counter2 <- 0
|
||||
|
||||
cat("Step 4","\n")
|
||||
## Loop through stocks
|
||||
for(stock in stockLoop)
|
||||
{
|
||||
t0<-Sys.time()
|
||||
##stock = "3845" # NB only for test single loop!
|
||||
## make graph file names:
|
||||
b <- with(cdat1,cdat1[Stock_ID == stock,c(1,3,5,12)]) # Stock_ID,AREA_Names,Country,"Species"
|
||||
bb <- do.call(paste,b)
|
||||
|
||||
yr <- cdat$yr[as.character(cdat$stock)==stock]
|
||||
ct <- as.numeric(cdat$ct[as.character(cdat$stock)==stock])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
res <- unique(as.character(cdat$res[as.character(cdat$stock)==stock])) ## resilience from FishBase, if needed, enable in PARAMETER SECTION
|
||||
nyr <- length(yr) ## number of years in the time series
|
||||
|
||||
cat("\n","Stock",stock,"\n")
|
||||
flush.console()
|
||||
|
||||
## PARAMETER SECTION
|
||||
mvlen=3
|
||||
ma=function(x,n=mvlen){filter(x,rep(1/n,n),sides=1)}
|
||||
|
||||
## If resilience is to be used, delete ## in rows 1-4 below and set ## in row 5 below
|
||||
start_r <- if(res == "Very low"){c(0.015, 0.1)}else{
|
||||
if(res == "Low") {c(0.05,0.5)}else {
|
||||
if(res == "High") {c(0.6,1.5)}else {c(0.2,1)}
|
||||
}
|
||||
}
|
||||
## Medium, or default if no res is found
|
||||
##start_r <- c(0.5,1.5) ## disable this line if you use resilience
|
||||
start_k <- c(max(ct),50*max(ct)) ## default for upper k e.g. 100 * max catch
|
||||
## startbio <- c(0.8,1) ## assumed biomass range at start of time series, as fraction of k
|
||||
##startbio <- if(ct[1]/max(ct) < 0.5) {c(0.5,0.9)} else {c(0.3,0.6)} ## use for batch processing
|
||||
|
||||
## NB: Yimin's new idea on 20Jan14
|
||||
startbio<- if(mean(ct[1:5])/max(ct) < 0.3) {c(0.6,0.95)} else {
|
||||
if(mean(ct[1:5])/max(ct)>0.3&mean(ct[1:5])/max(ct)<0.6) {c(0.3,0.7)} else {
|
||||
c(0.2,0.6)}}
|
||||
|
||||
interyr <- yr[2] ## interim year within time series for which biomass estimate is available; set to yr[2] if no estimates are available
|
||||
interbio <- c(0, 1) ## biomass range for interim year, as fraction of k; set to 0 and 1 if not available
|
||||
## finalbio <- c(0.8, 0.9) ## biomass range after last catches, as fraction of k
|
||||
## finalbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.3,0.7)} else {c(0.01,0.4)} ## use for batch processing
|
||||
|
||||
## Yimin's new stuff on 10Mar14
|
||||
#######> pre-classification
|
||||
|
||||
pre.clas=ct
|
||||
pre.clas[pre.clas==0]=0.1
|
||||
tx=ma(as.numeric(pre.clas),n=mvlen)
|
||||
Myr=which.max(tx)
|
||||
Maxc=pre.clas[which.max(tx)]
|
||||
|
||||
|
||||
if(Myr==1)startbio=c(0.05,0.6)else
|
||||
{
|
||||
if (ct[1]/Maxc>=0.5) startbio=c(0.4,0.85)
|
||||
else startbio=c(0.65,0.95)
|
||||
}
|
||||
|
||||
if (Myr==length(yr))finalbio=c(.4,.95) else # ie from fully to overexploited
|
||||
{
|
||||
if (tx[length(ct)]/Maxc>=0.5) finalbio=c(.4,.85)
|
||||
else finalbio=c(.05,.6)
|
||||
}
|
||||
|
||||
|
||||
# if (Myr==length(yr))finalbio=c(.5,.9)
|
||||
# #if (Myr<length(yr)){
|
||||
# # if ((tx[length(ct)]/Maxc)>=0.8) finalbio=c(.4,.8) else
|
||||
# # if (tx[length(ct)]/Maxc>0.5) finalbio=c(.3,.7) else finalbio=c(.05,.6)}
|
||||
# # below is the last used (20 Feb)
|
||||
# if (Myr<length(yr))
|
||||
# {
|
||||
# if (tx[length(ct)]/Maxc>0.5) finalbio=c(.2,.8)
|
||||
# else finalbio=c(.05,.6)
|
||||
# }
|
||||
|
||||
##############<
|
||||
n <- 30000 ## number of iterations, e.g. 100000
|
||||
sigR <- 0.0 ## process error; 0 if deterministic model; 0.05 reasonable value? 0.2 is too high
|
||||
|
||||
startbt <- seq(startbio[1], startbio[2], by = 0.05) ## apply range of start biomass in steps of 0.05
|
||||
parbound <- list(r = start_r, k = start_k, lambda = finalbio, sigR)
|
||||
|
||||
cat("Last year =",max(yr),", last catch =",1000*ct[nyr],"\n")
|
||||
cat("Resilience =",res,"\n")
|
||||
cat("Process error =", sigR,"\n")
|
||||
cat("Assumed initial biomass (B/k) =", startbio[1],"-", startbio[2], " k","\n")
|
||||
cat("Assumed intermediate biomass (B/k) in", interyr, " =", interbio[1],"-",interbio[2]," k","\n")
|
||||
cat("Assumed final biomass (B/k) =", parbound$lambda[1],"-",parbound$lambda[2]," k","\n")
|
||||
cat("Initial bounds for r =", parbound$r[1], "-", parbound$r[2],"\n")
|
||||
cat("Initial bounds for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
flush.console()
|
||||
|
||||
## MAIN
|
||||
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k, MSY and determine new bounds for r and k
|
||||
r1 <- R1$r[R1$ell==1]
|
||||
k1 <- R1$k[R1$ell==1]
|
||||
j1 <- R1$J[R1$ell==1] # Ye
|
||||
msy1 <- r1*k1/4
|
||||
mean_msy1 <- exp(mean(log(msy1)))
|
||||
max_k1a <- min(k1[r1<1.1*parbound$r[1]]) ## smallest k1 near initial lower bound of r
|
||||
max_k1b <- max(k1[r1*k1/4<mean_msy1]) ## largest k1 that gives mean MSY
|
||||
max_k1 <- if(max_k1a < max_k1b) {max_k1a} else {max_k1b}
|
||||
|
||||
if(length(r1)<10)
|
||||
{
|
||||
cat("Too few (", length(r1), ") possible r-k combinations,
|
||||
check input parameters","\n")
|
||||
appendPar <- ifelse(counter1==0,F,T)
|
||||
colnamePar <- ifelse(counter1==0,T,F)
|
||||
|
||||
NoModellingSpe <- as.data.frame(cbind(stock,length(r1),b))
|
||||
names(NoModellingSpe) <- c("Stock","No_of_r1",names(b))
|
||||
write.table(NoModellingSpe,file=outfile2,
|
||||
append = appendPar, row.names = FALSE,
|
||||
col.names=colnamePar,sep=",")
|
||||
flush.console()
|
||||
counter1 <- counter1 + 1
|
||||
}
|
||||
|
||||
if(length(r1)>=10)
|
||||
{
|
||||
## set new upper bound of r to 1.2 max r1
|
||||
parbound$r[2] <- 1.2*max(r1)
|
||||
## set new lower bound for k to 0.9 min k1 and upper bound to max_k1
|
||||
parbound$k <- c(0.9 * min(k1), max_k1)
|
||||
|
||||
cat("First MSY =", format(1000*mean_msy1, digits=3),"\n")
|
||||
cat("First r =", format(exp(mean(log(r1))), digits=3),"\n")
|
||||
cat("New upper bound for r =", format(parbound$r[2],digits=2),"\n")
|
||||
cat("New range for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
## Repeat analysis with new r-k bounds
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k and msy
|
||||
r = R1$r[R1$ell==1]
|
||||
k = R1$k[R1$ell==1]
|
||||
j = R1$J[R1$ell==1] # Ye
|
||||
msy = r * k / 4
|
||||
mean_ln_msy = mean(log(msy))
|
||||
|
||||
##############################################################
|
||||
##> Ye
|
||||
# BT=0
|
||||
|
||||
##
|
||||
R2<-getBiomass(r, k, j)
|
||||
|
||||
#R2<-R2[-1,]
|
||||
runs<-rep(1:length(r), each=nyr+1)
|
||||
years=rep(yr[1]:(yr[length(yr)]+1),length=length(r)*(length(yr)+1))
|
||||
|
||||
runs=t(runs)
|
||||
years=t(years)
|
||||
stock_id=rep(stock,length(runs))
|
||||
R3<-cbind(as.numeric(runs), as.numeric(years), stock_id, as.numeric(R2) )
|
||||
|
||||
## changed this, as otherwise biomass is the level of the factor below
|
||||
R4<-data.frame(R3, stringsAsFactors=FALSE)
|
||||
names(R4)<-c("Run", "Year", "Stock","Biomass")
|
||||
|
||||
Bmsy_x<-k*0.5
|
||||
Run<-c(1:length(r))
|
||||
BMSY<-cbind(Run, Bmsy_x)
|
||||
R5<-merge(R4, BMSY, by="Run", all.x=T, all.y=F)
|
||||
R5$B_Bmsy<-as.numeric(paste(R5$Biomass))/R5$Bmsy_x
|
||||
|
||||
### B/Bmsy calculated for all feasible combinations of r,K,B0
|
||||
R6<-aggregate(log(B_Bmsy)~as.numeric(Year)+Stock, data=R5,
|
||||
FUN=function(z){c(mean=mean(z),sd=sd(z),upr=exp(quantile(z, p=0.975)),
|
||||
lwr=exp(quantile(z, p=0.025)), lwrQ=exp(quantile(z, p=0.25)),
|
||||
uprQ=exp(quantile(z, p=0.75)))}) # from directly calculated from R5 becasue B_Bmsy has a lognormal dist
|
||||
|
||||
R6<-data.frame(cbind(R6[,1:2],R6[,3][,1],R6[,3][,2],R6[,3][,3],R6[,3][,4],R6[,3][,5], R6[,3][,6]))
|
||||
names(R6)<-c("Year", "Stock", "BoverBmsy", "BoverBmsySD","BoverBmsyUpper","BoverBmsyLower","BoverBmsylwrQ","BoverBmsyuprQ")
|
||||
##remove last entry as it is 1 greater than number of years
|
||||
## removed final year here for ease of dataframe output below
|
||||
R6<-R6[-length(R6),]
|
||||
## geometric mean
|
||||
GM_B_Bmsy<-exp(R6$BoverBmsy)
|
||||
GM_B_BmsySD=R6$BoverBmsySD #add
|
||||
## arithmetic mean
|
||||
M_B_Bmsy<-exp(R6$BoverBmsy+R6$BoverBmsySD^2/2)
|
||||
|
||||
### r,k, and MSY
|
||||
|
||||
#del GM_B_Bmsy=c(rep(0,(min(yr)-1940)),GM_B_Bmsy)
|
||||
#del GM_B_BmsySD=c(rep(0,(min(yr)-1940)),GM_B_BmsySD) ######
|
||||
#del M_B_Bmsy=c(rep(0,(min(yr)-1940)),M_B_Bmsy)
|
||||
#del yr1=seq(1940,max(yr))
|
||||
|
||||
yr1=yr #add
|
||||
|
||||
stockInfo <- with(cdat1,cdat1[Stock_ID==stock,1:12])
|
||||
temp=c(startbio[1],startbio[2],finalbio[1],finalbio[2],res,
|
||||
mean(log(r)),sd(log(r)),mean(log(k)),sd(log(k)),mean(log(msy)),
|
||||
sd(log(msy)),sigR,min(yr),max(yr),max(ct),length(r),GM_B_Bmsy,GM_B_BmsySD,M_B_Bmsy)
|
||||
|
||||
#add, adding "GM_B_BmsySD" in the line above
|
||||
|
||||
output=as.data.frame(matrix(temp,nrow=1))
|
||||
output <- cbind(stockInfo,output)
|
||||
names(output) <- c(names(cdat1)[1:12],"startbio[1]","startbio[2]","finalbio[1]","finalbio[2]",
|
||||
"res","mean(log(r))","sd(log(r))","mean(log(k))","sd(log(k))",
|
||||
"mean(log(msy))","sd(log(msy))","sigR","min(yr)","max(yr)","max(ct)",
|
||||
"length(r)",paste("GM_B_msy",yr1,sep="_"),paste("GM_B_msySD",yr1,sep="_"),paste("M_B_Bmsy",yr1,sep="_"))
|
||||
|
||||
#add, adding "paste("GM_B_msySD",yr1,sep="_")"in the line above
|
||||
|
||||
######< Ye
|
||||
########################################################
|
||||
|
||||
## plot MSY over catch data
|
||||
pdf(paste(bb,"graph.pdf",sep="_"))
|
||||
|
||||
par(mfcol=c(2,3))
|
||||
plot(yr, ct, type="l", ylim = c(0, max(ct)), xlab = "Year",
|
||||
ylab = "Catch (1000 t)",main = paste("StockID",stock,sep=":"))
|
||||
abline(h=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(h=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(h=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
hist(r, freq=F, xlim=c(0, 1.2 * max(r)), main = "")
|
||||
abline(v=exp(mean(log(r))),col="red",lwd=2)
|
||||
abline(v=exp(mean(log(r))-2*sd(log(r))),col="red")
|
||||
abline(v=exp(mean(log(r))+2*sd(log(r))),col="red")
|
||||
|
||||
plot(r1, k1, xlim = start_r, ylim = start_k, xlab="r", ylab="k (1000t)")
|
||||
|
||||
hist(k, freq=F, xlim=c(0, 1.2 * max(k)), xlab="k (1000t)", main = "")
|
||||
abline(v=exp(mean(log(k))),col="red", lwd=2)
|
||||
abline(v=exp(mean(log(k))-2*sd(log(k))),col="red")
|
||||
abline(v=exp(mean(log(k))+2*sd(log(k))),col="red")
|
||||
|
||||
|
||||
plot(log(r), log(k),xlab="ln(r)",ylab="ln(k)")
|
||||
abline(v=mean(log(r)))
|
||||
abline(h=mean(log(k)))
|
||||
abline(mean(log(msy))+log(4),-1, col="red",lwd=2)
|
||||
abline(mean(log(msy))-2*sd(log(msy))+log(4),-1, col="red")
|
||||
abline(mean(log(msy))+2*sd(log(msy))+log(4),-1, col="red")
|
||||
|
||||
hist(msy, freq=F, xlim=c(0, 1.2 * max(msy)), xlab="MSY (1000t)",main = "")
|
||||
abline(v=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(v=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(v=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
graphics.off()
|
||||
|
||||
|
||||
cat("Possible combinations = ", length(r),"\n")
|
||||
cat("geom. mean r =", format(exp(mean(log(r))),digits=3), "\n")
|
||||
cat("r +/- 2 SD =", format(exp(mean(log(r))-2*sd(log(r))),digits=3),"-",format(exp(mean(log(r))+2*sd(log(r))),digits=3), "\n")
|
||||
cat("geom. mean k =", format(1000*exp(mean(log(k))),digits=3), "\n")
|
||||
cat("k +/- 2 SD =", format(1000*exp(mean(log(k))-2*sd(log(k))),digits=3),"-",format(1000*exp(mean(log(k))+2*sd(log(k))),digits=3), "\n")
|
||||
cat("geom. mean MSY =", format(1000*exp(mean(log(msy))),digits=3),"\n")
|
||||
cat("MSY +/- 2 SD =", format(1000*exp(mean_ln_msy - 2 * sd(log(msy))),digits=3), "-", format(1000*exp(mean_ln_msy + 2 * sd(log(msy))),digits=3), "\n")
|
||||
|
||||
## Write results into outfile, in append mode (no header in file, existing files will be continued)
|
||||
## output = data.frame(stock, sigR, startbio[1], startbio[2], interbio[1], interbio[2], finalbio[1], finalbio[2], min(yr), max(yr), res, max(ct), ct[1], ct[nyr], length(r), exp(mean(log(r))), sd(log(r)), min(r), quantile(r,0.05), quantile(r,0.25), median(r), quantile(r,0.75), quantile(r,0.95), max(r), exp(mean(log(k))), sd(log(k)), min(k), quantile(k, 0.05), quantile(k, 0.25), median(k), quantile(k, 0.75), quantile(k, 0.95), max(k), exp(mean(log(msy))), sd(log(msy)), min(msy), quantile(msy, 0.05), quantile(msy, 0.25), median(msy), quantile(msy, 0.75), quantile(msy, 0.95), max(msy))
|
||||
|
||||
#write.table(output, file = outfile, append = TRUE, sep = ";", dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
appendPar <- ifelse(counter2==0,F,T)
|
||||
colnamePar <- ifelse(counter2==0,T,F)
|
||||
write.table(output, file = outfile, append = appendPar, sep = ",", dec = ".",
|
||||
row.names = FALSE, col.names = colnamePar)
|
||||
|
||||
counter2 <- counter2 + 1
|
||||
|
||||
}
|
||||
cat("Elapsed: ",Sys.time()-t0," \n")
|
||||
} ## End of stock loop, get next stock or exit
|
|
@ -0,0 +1,17 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<hibernate-configuration>
|
||||
<session-factory>
|
||||
<property name="connection.driver_class">org.postgresql.Driver</property>
|
||||
<property name="connection.provider_class">org.hibernate.connection.C3P0ConnectionProvider</property>
|
||||
<property name="connection.url">jdbc:postgresql://localhost/testdb</property>
|
||||
<property name="connection.username">gcube</property>
|
||||
<property name="connection.password">d4science2</property>
|
||||
<property name="dialect">org.hibernate.dialect.PostgreSQLDialect</property>
|
||||
<property name="transaction.factory_class">org.hibernate.transaction.JDBCTransactionFactory</property>
|
||||
<property name="c3p0.timeout">0</property>
|
||||
<property name="c3p0.max_size">1</property>
|
||||
<property name="c3p0.max_statements">0</property>
|
||||
<property name="c3p0.min_size">1</property>
|
||||
<property name="current_session_context_class">thread</property>
|
||||
</session-factory>
|
||||
</hibernate-configuration>
|
|
@ -0,0 +1,20 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<hibernate-configuration>
|
||||
<session-factory>
|
||||
<property name="connection.driver_class">org.postgresql.Driver</property>
|
||||
<property name="connection.provider_class">org.hibernate.connection.C3P0ConnectionProvider</property>
|
||||
<property name="connection.url">jdbc:postgresql://localhost/testdb</property>
|
||||
<property name="connection.username">gcube</property>
|
||||
<property name="connection.password">d4science2</property>
|
||||
<!-- <property name="dialect">org.hibernatespatial.postgis.PostgisDialect</property>-->
|
||||
<property name="dialect">org.hibernate.dialect.PostgreSQLDialect</property>
|
||||
<property name="transaction.factory_class">org.hibernate.transaction.JDBCTransactionFactory</property>
|
||||
<property name="connection.pool_size">1</property>
|
||||
<property name="c3p0.timeout">0</property>
|
||||
<property name="c3p0.max_size">1</property>
|
||||
<property name="c3p0.max_statements">0</property>
|
||||
<property name="c3p0.min_size">1</property>
|
||||
<property name="c3p0.checkoutTimeout">1</property>
|
||||
<property name="current_session_context_class">thread</property>
|
||||
</session-factory>
|
||||
</hibernate-configuration>
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,696 @@
|
|||
##--------------------------------------------------------
|
||||
## CMSY analysis with estimation of total biomass, including Bayesian Schaefer
|
||||
## written by Rainer Froese with support from Gianpaolo Coro in 2013-2014
|
||||
## This version adjusts biomass to average biomass over the year
|
||||
## It also contains the FutureCrash option to improve prediction of final biomass
|
||||
## Version 21 adds the purple point to indicate the 25th percentile of final biomass
|
||||
## Version 22 accepts that no biomass or CPUE area available
|
||||
##--------------------------------------------------------
|
||||
library(R2jags) # Interface with JAGS
|
||||
library(coda)
|
||||
|
||||
#-----------------------------------------
|
||||
# Some general settings
|
||||
#-----------------------------------------
|
||||
# set.seed(999) # use for comparing results between runs
|
||||
rm(list=ls(all=TRUE)) # clear previous variables etc
|
||||
options(digits=3) # displays all numbers with three significant digits as default
|
||||
graphics.off() # close graphics windows from previous sessions
|
||||
|
||||
#-----------------------------------------
|
||||
# General settings for the analysis
|
||||
#-----------------------------------------
|
||||
sigR <- 0.02 # overall process error; 0.05 works reasonable for simulations, 0.02 for real data; 0 if deterministic model
|
||||
n <- 10000 # initial number of r-k pairs
|
||||
batch.mode <- T # set to TRUE to suppress graphs
|
||||
write.output <- T # set to true if table of output is wanted
|
||||
FutureCrash <- "No"
|
||||
|
||||
#-----------------------------------------
|
||||
# Start output to screen
|
||||
#-----------------------------------------
|
||||
cat("-------------------------------------------\n")
|
||||
cat("Catch-MSY Analysis,", date(),"\n")
|
||||
cat("-------------------------------------------\n")
|
||||
|
||||
#------------------------------------------
|
||||
# Read data and assign to vectors
|
||||
#------------------------------------------
|
||||
# filename_1 <- "AllStocks_Catch4.csv"
|
||||
# filename_2 <- "AllStocks_ID4.csv"
|
||||
# filename_1 <- "SimCatch.csv"
|
||||
# filename_2 <- "SimSpec.csv"
|
||||
# filename_2 <- "SimSpecWrongS.csv"
|
||||
# filename_2 <- "SimSpecWrongI.csv"
|
||||
# filename_2 <- "SimSpecWrongF.csv"
|
||||
# filename_2 <- "SimSpecWrongH.csv"
|
||||
# filename_2 <- "SimSpecWrongL.csv"
|
||||
# filename_1 <- "FishDataLim.csv"
|
||||
# filename_2 <- "FishDataLimSpec.csv"
|
||||
filename_1 <- "WKLIFE4Stocks.csv"
|
||||
filename_2 <- "WKLIFE4ID.csv"
|
||||
|
||||
outfile<-"outfile"
|
||||
outfile.txt <- "outputfile.txt"
|
||||
|
||||
cdat <- read.csv(filename_1, header=T, dec=".", stringsAsFactors = FALSE)
|
||||
cinfo <- read.csv(filename_2, header=T, dec=".", stringsAsFactors = FALSE)
|
||||
cat("Files", filename_1, ",", filename_2, "read successfully","\n")
|
||||
|
||||
# Stocks with total biomass data and catch data from StartYear to EndYear
|
||||
# stocks <- sort(as.character(cinfo$stock)) # All stocks
|
||||
stocks<-"HLH_M07"
|
||||
|
||||
# select one stock after the other
|
||||
for(stock in stocks) {
|
||||
# assign data from cinfo to vectors
|
||||
res <- as.character(cinfo$Resilience[cinfo$stock==stock])
|
||||
StartYear <- as.numeric(cinfo$StartYear[cinfo$stock==stock])
|
||||
EndYear <- as.numeric(cinfo$EndYear[cinfo$stock==stock])
|
||||
r_low <- as.numeric(cinfo$r_low[cinfo$stock==stock])
|
||||
r_hi <- as.numeric(cinfo$r_hi[cinfo$stock==stock])
|
||||
stb_low <- as.numeric(cinfo$stb_low[cinfo$stock==stock])
|
||||
stb_hi <- as.numeric(cinfo$stb_hi[cinfo$stock==stock])
|
||||
intyr <- as.numeric(cinfo$intyr[cinfo$stock==stock])
|
||||
intbio_low <- as.numeric(cinfo$intbio_low[cinfo$stock==stock])
|
||||
intbio_hi <- as.numeric(cinfo$intbio_hi[cinfo$stock==stock])
|
||||
endbio_low <- as.numeric(cinfo$endbio_low[cinfo$stock==stock])
|
||||
endbio_hi <- as.numeric(cinfo$endbio_hi[cinfo$stock==stock])
|
||||
Btype <- as.character(cinfo$Btype[cinfo$stock==stock])
|
||||
FutureCrash <- as.character(cinfo$FutureCrash[cinfo$stock==stock])
|
||||
comment <- as.character(cinfo$comment[cinfo$stock==stock])
|
||||
|
||||
|
||||
# extract data on stock
|
||||
yr <- as.numeric(cdat$yr[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])
|
||||
ct <- as.numeric(cdat$ct[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
if(Btype=="observed" | Btype=="CPUE" | Btype=="simulated") {
|
||||
bt <- as.numeric(cdat$TB[cdat$stock==stock & cdat$yr >= StartYear & cdat$yr <= EndYear])/1000 ## assumes that biomass is in tonnes, transforms to '000 tonnes
|
||||
} else {bt <- NA}
|
||||
nyr <- length(yr) # number of years in the time series
|
||||
|
||||
|
||||
if(Btype!="observed") {bio <- bt}
|
||||
# change biomass to moving average as assumed by Schaefer (but not for simulations or CPUE)
|
||||
# for last year use reported bio
|
||||
if(Btype=="observed") {
|
||||
ma <- function(x){filter(x,rep(1/2,2),sides=2)}
|
||||
bio <- ma(bt)
|
||||
bio[length(bio)] <- bt[length(bt)] }
|
||||
|
||||
# initialize vectors for viable r, k, bt
|
||||
rv.all <- vector()
|
||||
kv.all <- vector()
|
||||
btv.all <- matrix(data=vector(),ncol=nyr+1)
|
||||
|
||||
|
||||
|
||||
#----------------------------------------------------
|
||||
# Determine initial ranges for parameters and biomass
|
||||
#----------------------------------------------------
|
||||
# initial range of r from input file
|
||||
if(is.na(r_low)==F & is.na(r_hi)==F) {
|
||||
start_r <- c(r_low,r_hi)
|
||||
} else {
|
||||
# initial range of r and CatchMult values based on resilience
|
||||
if(res == "High") {
|
||||
start_r <- c(0.6,1.5)} else if(res == "Medium") {
|
||||
start_r <- c(0.2,0.8)} else if(res == "Low") {
|
||||
start_r <- c(0.05,0.5)} else { # i.e. res== "Very low"
|
||||
start_r <- c(0.015,0.1)}
|
||||
}
|
||||
|
||||
|
||||
# initial range of k values, assuming k will always be larger than max catch
|
||||
# and max catch will never be smaller than a quarter of MSY
|
||||
|
||||
start_k <- c(max(ct),16*max(ct)/start_r[1])
|
||||
|
||||
# initial biomass range from input file
|
||||
if(is.na(stb_low)==F & is.na(stb_hi)==F) {
|
||||
startbio <- c(stb_low,stb_hi)
|
||||
} else {
|
||||
# us low biomass at start as default
|
||||
startbio <- c(0.1,0.5)
|
||||
}
|
||||
|
||||
MinYear <- yr[which.min(ct)]
|
||||
MaxYear <- yr[which.max(ct)]
|
||||
# use year and biomass range for intermediate biomass from input file
|
||||
if(is.na(intbio_low)==F & is.na(intbio_hi)==F) {
|
||||
intyr <- intyr
|
||||
intbio <- c(intbio_low,intbio_hi)
|
||||
# else if year of minimum catch is at least 3 years away from StartYear and EndYear of series, use min catch
|
||||
} else if((MinYear - StartYear) > 3 & (EndYear - MinYear) > 3 ) {
|
||||
# assume that biomass range in year before minimum catch was 0.01 - 0.4
|
||||
intyr <- MinYear-1
|
||||
intbio <- c(0.01,0.4)
|
||||
# else if year of max catch is at least 3 years away from StartYear and EndYear of series, use max catch
|
||||
} else if((MaxYear - StartYear) > 3 & (EndYear - MaxYear) > 3 ) {
|
||||
# assume that biomass range in year before maximum catch was 0.3 - 0.9
|
||||
intyr <- MaxYear-1
|
||||
intbio <- c(0.3,0.9)
|
||||
} else {
|
||||
# assume uninformative range 0-1 in mid-year
|
||||
intyr <- as.integer(mean(c(StartYear, EndYear)))
|
||||
intbio <- c(0,1) }
|
||||
# end of intbio setting
|
||||
|
||||
# final biomass range from input file
|
||||
if(is.na(endbio_low)==F & is.na(endbio_hi)==F) {
|
||||
endbio <- c(endbio_low,endbio_hi)
|
||||
} else {
|
||||
# else use Catch/maxCatch to estimate final biomass
|
||||
endbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.4,0.8)} else {c(0.01,0.4)}
|
||||
} # end of final biomass setting
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
# MC with Schaefer Function filtering
|
||||
#----------------------------------------------
|
||||
Schaefer <- function(ri, ki, startbio, intyr, intbio, endbio, sigR, pt) {
|
||||
|
||||
# if stock is not expected to crash within 3 years if last catch continues
|
||||
if(FutureCrash == "No") {
|
||||
yr.s <- c(yr,EndYear+1,EndYear+2,EndYear+3)
|
||||
ct.s <- c(ct,ct[yr==EndYear],ct[yr==EndYear],ct[yr==EndYear])
|
||||
nyr.s <- length(yr.s)
|
||||
} else{
|
||||
yr.s <- yr
|
||||
ct.s <- ct
|
||||
nyr.s <- nyr
|
||||
}
|
||||
|
||||
# create vector for initial biomasses
|
||||
startbt <-seq(from =startbio[1], to=startbio[2], by = (startbio[2]-startbio[1])/10)
|
||||
# create vectors for viable r, k and bt
|
||||
rv <- array(-1:-1,dim=c(length(ri)*length(startbt))) #initialize array with -1. The -1 remaining after the process will be removed
|
||||
kv <- array(-1:-1,dim=c(length(ri)*length(startbt)))
|
||||
btv <- matrix(data=NA, nrow = (length(ri)*length(startbt)), ncol = nyr+1)
|
||||
intyr.i <- which(yr.s==intyr) # get index of intermediate year
|
||||
|
||||
#loop through r-k pairs
|
||||
npoints = length(ri)
|
||||
nstartb = length(startbt)
|
||||
|
||||
for(i in 1 : npoints) {
|
||||
if (i%%1000==0)
|
||||
cat(".")
|
||||
|
||||
# create empty vector for annual biomasses
|
||||
bt <- vector()
|
||||
|
||||
# loop through range of relative start biomasses
|
||||
for(j in startbt) {
|
||||
# set initial biomass, including process error
|
||||
bt[1]=j*ki[i]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
|
||||
#loop through years in catch time series
|
||||
for(t in 1:nyr.s) { # for all years in the time series
|
||||
xt=rnorm(1,0, sigR) # set new random process error for every year
|
||||
|
||||
# calculate biomass as function of previous year's biomass plus surplus production minus catch
|
||||
bt[t+1]=(bt[t]+ri[i]*bt[t]*(1-bt[t]/ki[i])-ct.s[t])*exp(xt)
|
||||
|
||||
# if biomass < 0.01 k or > 1.1 k, discard r-k pair
|
||||
if(bt[t+1] < 0.01*ki[i] || bt[t+1] > 1.1*ki[i]) { break } # stop looping through years, go to next upper level
|
||||
|
||||
if ((t+1)==intyr.i && (bt[t+1]>(intbio[2]*ki[i]) || bt[t+1]<(intbio[1]*ki[i]))) { break } #intermediate year check
|
||||
|
||||
} # end of loop of years
|
||||
|
||||
# if last biomass falls without expected ranges goto next r-k pair
|
||||
if(t < nyr.s || bt[yr.s==EndYear] > (endbio[2]*ki[i]) || bt[yr.s==EndYear] < (endbio[1]*ki[i])) {
|
||||
next } else {
|
||||
# store r, k, and bt, plot point, then go to next startbt
|
||||
rv[((i-1)*nstartb)+j] <- ri[i]
|
||||
kv[((i-1)*nstartb)+j] <- ki[i]
|
||||
btv[((i-1)*nstartb)+j,] <- bt[1:(nyr+1)]/ki[i] #substitute a row into the matrix, exclude FutureCrash years
|
||||
if(pt==T) {points(x=ri[i], y=ki[i], pch=".", cex=2, col="black")
|
||||
next }
|
||||
}
|
||||
} # end of loop of initial biomasses
|
||||
} # end of loop of r-k pairs
|
||||
|
||||
rv=rv[rv!=-1]
|
||||
kv=kv[kv!=-1]
|
||||
btv=na.omit(btv) #delete first line
|
||||
|
||||
cat("\n")
|
||||
return(list(rv, kv,btv))
|
||||
} # end of Schaefer function
|
||||
|
||||
#------------------------------------------------------------------
|
||||
# Uniform sampling of the r-k space
|
||||
#------------------------------------------------------------------
|
||||
# get random set of r and k from log space distribution
|
||||
ri1 = exp(runif(n, log(start_r[1]), log(start_r[2])))
|
||||
ki1 = exp(runif(n, log(start_k[1]), log(start_k[2])))
|
||||
|
||||
#-----------------------------------------------------------------
|
||||
# Plot data and progress
|
||||
#-----------------------------------------------------------------
|
||||
#windows(14,9)
|
||||
par(mfcol=c(2,3))
|
||||
# plot catch
|
||||
plot(x=yr, y=ct, ylim=c(0,1.2*max(ct)), type ="l", bty="l", main=paste(stock,"catch"), xlab="Year",
|
||||
ylab="Catch", lwd=2)
|
||||
points(x=yr[which.max(ct)], y=max(ct), col="red", lwd=2)
|
||||
points(x=yr[which.min(ct)], y=min(ct), col="red", lwd=2)
|
||||
|
||||
# plot r-k graph
|
||||
plot(ri1, ki1, xlim = start_r, ylim = start_k, log="xy", xlab="r", ylab="k", main="Finding viable r-k", pch=".", cex=2, bty="l", col="lightgrey")
|
||||
|
||||
#1 - Call MC-Schaefer function to preliminary explore the space without prior information
|
||||
cat(stock, ": First Monte Carlo filtering of r-k space with ",n," points\n")
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
#take viable r and k values
|
||||
nviablepoints = length(rv.all)
|
||||
cat("* Found ",nviablepoints," viable points from ",n," samples\n");
|
||||
|
||||
|
||||
#if few points were found then resample and shrink the k log space
|
||||
if (nviablepoints<=1000){
|
||||
log.start_k.new <- log(start_k)
|
||||
max_attempts = 3
|
||||
current_attempts = 1
|
||||
while (nviablepoints<=1000 && current_attempts<=max_attempts){
|
||||
if(nviablepoints > 0) {
|
||||
log.start_k.new[1] <- mean(c(log.start_k.new[1], min(log(kv.all))))
|
||||
log.start_k.new[2] <- mean(c(log.start_k.new[2], max(log(kv.all)))) }
|
||||
n.new=n*current_attempts #add more points
|
||||
ri1 = exp(runif(n.new, log(start_r[1]), log(start_r[2])))
|
||||
ki1 = exp(runif(n.new, log.start_k.new[1], log.start_k.new[2]))
|
||||
cat("Shrinking k space: repeating Monte Carlo in the interval [",exp(log.start_k.new[1]),",",exp(log.start_k.new[2]),"]\n")
|
||||
cat("Attempt ",current_attempts," of ",max_attempts," with ",n.new," points","\n")
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
nviablepoints = length(rv.all) #recalculate viable points
|
||||
cat("* Found altogether",nviablepoints," viable points \n");
|
||||
current_attempts=current_attempts+1 #increment the number of attempts
|
||||
}
|
||||
}
|
||||
|
||||
# If tip of viable r-k pairs is 'thin', do extra sampling there
|
||||
gm.rv = exp(mean(log(rv.all)))
|
||||
if(length(rv.all[rv.all > 0.9*start_r[2]]) < 10) {
|
||||
l.sample.r <- (gm.rv + max(rv.all))/2
|
||||
cat("Final sampling in the tip area above r =",l.sample.r,"\n")
|
||||
log.start_k.new <- c(log(0.8*min(kv.all)),log(max(kv.all[rv.all > l.sample.r])))
|
||||
ri1 = exp(runif(50000, log(l.sample.r), log(start_r[2])))
|
||||
ki1 = exp(runif(50000, log.start_k.new[1], log.start_k.new[2]))
|
||||
MCA <- Schaefer(ri=ri1, ki=ki1, startbio=startbio, intyr=intyr, intbio=intbio, endbio=endbio, sigR=sigR, pt=T)
|
||||
rv.all <- append(rv.all,MCA[[1]])
|
||||
kv.all <- append(kv.all,MCA[[2]])
|
||||
btv.all <- rbind(btv.all,MCA[[3]])
|
||||
nviablepoints = length(rv.all) #recalculate viable points
|
||||
cat("Found altogether", length(rv.all), "unique viable r-k pairs and biomass trajectories\n")
|
||||
}
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Bayesian analysis of catch & biomass with Schaefer model
|
||||
# ------------------------------------------------------------
|
||||
if(Btype == "observed" | Btype=="simulated") {
|
||||
cat("Running Schaefer MCMC analysis....\n")
|
||||
mcmc.burn <- as.integer(30000)
|
||||
mcmc.chainLength <- as.integer(60000) # burn-in plus post-burn
|
||||
mcmc.thin = 10 # to reduce autocorrelation
|
||||
mcmc.chains = 3 # needs to be at least 2 for DIC
|
||||
|
||||
# Parameters to be returned by JAGS
|
||||
jags.save.params=c('r','k','sigma.b', 'alpha', 'sigma.r') #
|
||||
|
||||
# JAGS model
|
||||
Model = "model{
|
||||
# to avoid crash due to 0 values
|
||||
eps<-0.01
|
||||
# set a quite narrow variation from the expected value
|
||||
sigma.b <- 1/16
|
||||
tau.b <- pow(sigma.b,-2)
|
||||
|
||||
Bm[1] <- log(alpha*k)
|
||||
bio[1] ~ dlnorm(Bm[1],tau.b)
|
||||
|
||||
|
||||
for (t in 2:nyr){
|
||||
bio[t] ~ dlnorm(Bm[t],tau.b)
|
||||
Bm[t] <- log(max(bio[t-1] + r*bio[t-1]*(1 - (bio[t-1])/k) - ct[t-1], eps))
|
||||
}
|
||||
|
||||
# priors
|
||||
alpha ~ dunif(0.01,1) # needed for fit of first biomass
|
||||
#inverse cubic root relationship between the range of viable r and the size of the search space
|
||||
inverseRangeFactor <- 1/((start_r[2]-start_r[1])^1/3)
|
||||
|
||||
# give sigma some variability in the inverse relationship
|
||||
sigma.r ~ dunif(0.001*inverseRangeFactor,0.02*inverseRangeFactor)
|
||||
tau.r <- pow(sigma.r,-2)
|
||||
rm <- log((start_r[1]+start_r[2])/2)
|
||||
r ~ dlnorm(rm,tau.r)
|
||||
|
||||
# search in the k space from the center of the range. Allow high variability
|
||||
km <- log((start_k[1]+start_k[2])/2)
|
||||
tau.k <- pow(km,-2)
|
||||
k ~ dlnorm(km,tau.k)
|
||||
|
||||
#end model
|
||||
}"
|
||||
|
||||
# Write JAGS model to file
|
||||
cat(Model, file="r2jags.bug")
|
||||
|
||||
### random seed
|
||||
set.seed(runif(1,1,500)) # needed in JAGS
|
||||
|
||||
### run model
|
||||
jags_outputs <- jags(data=c('ct','bio','nyr', 'start_r', 'start_k'),
|
||||
working.directory=NULL, inits=NULL,
|
||||
parameters.to.save= jags.save.params,
|
||||
model.file="r2jags.bug", n.chains = mcmc.chains,
|
||||
n.burnin = mcmc.burn, n.thin = mcmc.thin, n.iter = mcmc.chainLength,
|
||||
refresh=mcmc.burn/20, )
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Results from JAGS Schaefer
|
||||
# ------------------------------------------------------
|
||||
r_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$r))
|
||||
k_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$k))
|
||||
## sigma_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$sigma.b))
|
||||
alpha_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$alpha))
|
||||
## sigma.r_out <- as.numeric(mcmc(jags_outputs$BUGSoutput$sims.list$sigma.r))
|
||||
|
||||
mean.log.r.jags <- mean(log(r_out))
|
||||
SD.log.r.jags <- sd(log(r_out))
|
||||
lcl.log.r.jags <- mean.log.r.jags-1.96*SD.log.r.jags
|
||||
ucl.log.r.jags <- mean.log.r.jags+1.96*SD.log.r.jags
|
||||
gm.r.jags <- exp(mean.log.r.jags)
|
||||
lcl.r.jags <- exp(lcl.log.r.jags)
|
||||
ucl.r.jags <- exp(ucl.log.r.jags)
|
||||
mean.log.k.jags <- mean(log(k_out))
|
||||
SD.log.k.jags <- sd(log(k_out))
|
||||
lcl.log.k.jags <- mean.log.k.jags-1.96*SD.log.k.jags
|
||||
ucl.log.k.jags <- mean.log.k.jags+1.96*SD.log.k.jags
|
||||
gm.k.jags <- exp(mean.log.k.jags)
|
||||
lcl.k.jags <- exp(lcl.log.k.jags)
|
||||
ucl.k.jags <- exp(ucl.log.k.jags)
|
||||
mean.log.MSY.jags<- mean(log(r_out)+log(k_out)-log(4))
|
||||
SD.log.MSY.jags <- sd(log(r_out)+log(k_out)-log(4))
|
||||
gm.MSY.jags <- exp(mean.log.MSY.jags)
|
||||
lcl.MSY.jags <- exp(mean.log.MSY.jags-1.96*SD.log.MSY.jags)
|
||||
ucl.MSY.jags <- exp(mean.log.MSY.jags+1.96*SD.log.MSY.jags)
|
||||
|
||||
} # end of MCMC Schaefer loop
|
||||
|
||||
#------------------------------------
|
||||
# get results from CMSY
|
||||
#------------------------------------
|
||||
# get estimate of most probable r as median of mid log.r-classes above cut-off
|
||||
# get remaining viable log.r and log.k
|
||||
rem.log.r <- log(rv.all[rv.all > gm.rv])
|
||||
rem.log.k <- log(kv.all[rv.all>gm.rv])
|
||||
# get vectors with numbers of r and mid values in about 25 classes
|
||||
hist.log.r <- hist(x=rem.log.r, breaks=25, plot=F)
|
||||
log.r.counts <- hist.log.r$counts
|
||||
log.r.mids <- hist.log.r$mids
|
||||
# get most probable log.r as mean of mids with counts > 0
|
||||
log.r.est <- median(log.r.mids[which(log.r.counts > 0)])
|
||||
lcl.log.r <- as.numeric(quantile(x=log.r.mids[which(log.r.counts > 0)], 0.025))
|
||||
ucl.log.r <- as.numeric(quantile(x=log.r.mids[which(log.r.counts > 0)], 0.975))
|
||||
r.est <- exp(log.r.est)
|
||||
lcl.r.est <- exp(lcl.log.r)
|
||||
ucl.r.est <- exp(ucl.log.r)
|
||||
|
||||
# do linear regression of log k ~ log r with slope fixed to -1 (from Schaefer)
|
||||
reg <- lm(rem.log.k ~ 1 + offset(-1*rem.log.r))
|
||||
int.reg <- as.numeric(reg[1])
|
||||
sd.reg <- sd(resid(reg))
|
||||
se.reg <- summary(reg)$coefficients[2]
|
||||
# get estimate of log(k) from y where x = log.r.est
|
||||
log.k.est <- int.reg + (-1) * log.r.est
|
||||
# get estimates of CL of log.k.est from y +/- SD where x = lcl.log r or ucl.log.r
|
||||
lcl.log.k <- int.reg + (-1) * ucl.log.r - sd.reg
|
||||
ucl.log.k <- int.reg + (-1) * lcl.log.r + sd.reg
|
||||
k.est <- exp(log.k.est)
|
||||
lcl.k.est <- exp(lcl.log.k)
|
||||
ucl.k.est <- exp(ucl.log.k)
|
||||
|
||||
# get MSY from remaining log r-k pairs
|
||||
log.MSY.est <- mean(rem.log.r + rem.log.k - log(4))
|
||||
sd.log.MSY.est <- sd(rem.log.r + rem.log.k - log(4))
|
||||
lcl.log.MSY.est <- log.MSY.est - 1.96*sd.log.MSY.est
|
||||
ucl.log.MSY.est <- log.MSY.est + 1.96*sd.log.MSY.est
|
||||
MSY.est <- exp(log.MSY.est)
|
||||
lcl.MSY.est <- exp(lcl.log.MSY.est)
|
||||
ucl.MSY.est <- exp(ucl.log.MSY.est)
|
||||
|
||||
# get predicted biomass vectors as median and quantiles of trajectories
|
||||
median.btv <- apply(btv.all,2, median)
|
||||
lastyr.bio <- median.btv[length(median.btv)-1]
|
||||
nextyr.bio <- median.btv[length(median.btv)]
|
||||
lcl.btv <- apply(btv.all,2, quantile, probs=0.025)
|
||||
q.btv <- apply(btv.all,2, quantile, probs=0.25)
|
||||
ucl.btv <- apply(btv.all,2, quantile, probs=0.975)
|
||||
lcl.lastyr.bio <- lcl.btv[length(lcl.btv)-1]
|
||||
ucl.lastyr.bio <- ucl.btv[length(lcl.btv)-1]
|
||||
lcl.nextyr.bio <- lcl.btv[length(lcl.btv)]
|
||||
ucl.nextyr.bio <- ucl.btv[length(lcl.btv)]
|
||||
|
||||
# -----------------------------------------
|
||||
# Plot results
|
||||
# -----------------------------------------
|
||||
# Analysis of viable r-k pairs
|
||||
plot(x=rv.all, y=kv.all, xlim=start_r,
|
||||
ylim=c(0.9*min(kv.all, ifelse(Btype == "observed",k_out,NA), na.rm=T), 1.1*max(kv.all)),
|
||||
pch=16, col="grey",log="xy", bty="l",
|
||||
xlab="r", ylab="k", main="Analysis of viable r-k")
|
||||
abline(v=gm.rv, lty="dashed")
|
||||
|
||||
# plot points and best estimate from full Schaefer analysis
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
# plot r-k pairs from MCMC
|
||||
points(x=r_out, y=k_out, pch=16,cex=0.5)
|
||||
# plot best r-k pair from MCMC
|
||||
points(x=gm.r.jags, y=gm.k.jags, pch=19, col="green")
|
||||
lines(x=c(lcl.r.jags, ucl.r.jags),y=c(gm.k.jags,gm.k.jags), col="green")
|
||||
lines(x=c(gm.r.jags,gm.r.jags),y=c(lcl.k.jags, ucl.k.jags), col="green")
|
||||
}
|
||||
|
||||
# if data are from simulation, plot true r and k
|
||||
if(Btype=="simulated") {
|
||||
l.stock <- nchar(stock) # get length of sim stock name
|
||||
r.char <- substr(stock,l.stock-1,l.stock) # get last character of sim stock name
|
||||
r.sim <- NA # initialize vector for r used in simulation
|
||||
if(r.char=="_H") {r.sim=1; lcl.r.sim=0.8; ucl.r.sim=1.25} else
|
||||
if(r.char=="_M") {r.sim=0.5;lcl.r.sim=0.4;ucl.r.sim=0.62} else
|
||||
if(r.char=="_L") {r.sim=0.25;lcl.r.sim=0.2;ucl.r.sim=0.31} else {r.sim=0.05;lcl.r.sim=0.04;ucl.r.sim=0.062}
|
||||
# plot true r-k point with error bars
|
||||
points(x=r.sim, y=1000, pch=19, col="red")
|
||||
# add +/- 20% error bars
|
||||
lines(x=c(lcl.r.sim,ucl.r.sim), y=c(1000,1000), col="red")
|
||||
lines(x=c(r.sim,r.sim), y=c(800,1250), col="red")
|
||||
}
|
||||
|
||||
# plot blue dot for proposed r-k, with 95% CL lines
|
||||
points(x=r.est, y=k.est, pch=19, col="blue")
|
||||
lines(x=c(lcl.r.est, ucl.r.est),y=c(k.est,k.est), col="blue")
|
||||
lines(x=c(r.est,r.est),y=c(lcl.k.est, ucl.k.est), col="blue")
|
||||
|
||||
# plot biomass graph
|
||||
# determine k to use for red line in b/k plot
|
||||
if(Btype=="simulated") {k2use <- 1000} else
|
||||
if(Btype == "observed") {k2use <- gm.k.jags} else {k2use <- k.est}
|
||||
# determine hight of y-axis in plot
|
||||
max.y <- max(c(bio/k2use,ucl.btv,0.6,startbio[2], intbio[2],endbio[2]),na.rm=T)
|
||||
|
||||
plot(x=yr,y=median.btv[1:nyr], lwd=2, xlab="Year", ylab="Relative biomass b/k", type="l",
|
||||
ylim=c(0,max.y), bty="l", main=paste("Pred. biomass vs ", Btype,sep=""))
|
||||
lines(x=yr, y=lcl.btv[1:nyr],type="l")
|
||||
lines(x=yr, y=ucl.btv[1:nyr],type="l")
|
||||
points(x=EndYear,y=q.btv[yr==EndYear], col="purple", cex=1.5, lwd=2)
|
||||
abline(h=0.5, lty="dashed")
|
||||
abline(h=0.25, lty="dotted")
|
||||
lines(x=c(yr[1],yr[1]), y=startbio, col="blue")
|
||||
lines(x=c(intyr,intyr), y=intbio, col="blue")
|
||||
lines(x=c(max(yr),max(yr)), y=endbio, col="blue")
|
||||
|
||||
# if observed biomass is available, plot red biomass line
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
lines(x=yr, y=bio/k2use,type="l", col="red", lwd=1)
|
||||
}
|
||||
|
||||
# if CPUE data are available, scale to predicted biomass range, plot red biomass line
|
||||
if(Btype == "CPUE") {
|
||||
par(new=T) # prepares for new plot on top of previous
|
||||
plot(x=yr, y=bio, type="l", col="red", lwd=1,
|
||||
ann=F,axes=F,ylim=c(0,1.2*max(bio, na.rm=T))) # forces this plot on top of previous one
|
||||
axis(4, col="red", col.axis="red")
|
||||
}
|
||||
|
||||
# plot yield and biomass against equilibrium surplus parabola
|
||||
max.y <-max(c(ct/MSY.est,ifelse(Btype=="observed"|Btype=="simulated",ct/gm.MSY.jags,NA),1.2),na.rm=T)
|
||||
# plot parabola
|
||||
x=seq(from=0,to=2,by=0.001)
|
||||
y=4*x-(2*x)^2
|
||||
plot(x=x, y=y, xlim=c(0,1), ylim=c(0,max.y), type="l", bty="l",xlab="Relative biomass b/k",
|
||||
ylab="Catch / MSY", main="Equilibrium curve")
|
||||
# plot catch against CMSY biomass estimates
|
||||
points(x=median.btv[1:nyr], y=ct/MSY.est, pch=16, col="grey")
|
||||
points(x=q.btv[yr==EndYear],y=ct[yr==EndYear]/MSY.est, col="purple", cex=1.5, lwd=2)
|
||||
# plot catch against observed biomass or CPUE
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
points(x=bio/k2use, y=ct/gm.MSY.jags, pch=16, cex=0.5)
|
||||
}
|
||||
|
||||
# plot exploitation rate u against u.msy
|
||||
# get u derived from predicted CMSY biomass
|
||||
u.CMSY <- ct/(median.btv[1:nyr]*k.est)
|
||||
u.msy.CMSY <- 1-exp(-r.est/2) # # Fmsy from CMSY expressed as exploitation rate
|
||||
# get u from observed or simulated biomass
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
u.bio <- ct/bio
|
||||
u.msy.bio <- 1-exp(-gm.r.jags/2)
|
||||
}
|
||||
# get u from CPUE
|
||||
if(Btype == "CPUE") {
|
||||
q=max(median.btv[1:nyr][is.na(bio)==F],na.rm=T)*k.est/max(bio,na.rm=T)
|
||||
u.CPUE <- ct/(q*bio)
|
||||
}
|
||||
|
||||
# determine upper bound of Y-axis
|
||||
max.y <- max(c(1.5, 1.2*u.CMSY/u.msy.CMSY,ct[yr==EndYear]/(q.btv[yr==EndYear]*k.est)/u.msy.CMSY,
|
||||
ifelse(Btype=="observed"|Btype=="simulated",max(u.bio[is.na(u.bio)==F]/u.msy.bio),0),
|
||||
na.rm=T))
|
||||
# plot u from CMSY
|
||||
plot(x=yr,y=u.CMSY/u.msy.CMSY, type="l", bty="l", ylim=c(0,max.y), xlab="Year",
|
||||
ylab="u / u_msy", main="Exploitation rate")
|
||||
abline(h=1, lty="dashed")
|
||||
points(x=EndYear,y=ct[yr==EndYear]/(q.btv[yr==EndYear]*k.est)/u.msy.CMSY, col="purple", cex=1.5, lwd=2)
|
||||
# plot u from biomass
|
||||
if(Btype == "observed"|Btype=="simulated") lines(x=yr, y=u.bio/u.msy.bio, col="red")
|
||||
# plot u from CPUE
|
||||
if(Btype == "CPUE") {
|
||||
par(new=T) # prepares for new plot on top of previous
|
||||
plot(x=yr, y=u.CPUE, type="l", col="red", ylim=c(0, 1.2*max(u.CPUE,na.rm=T)),ann=F,axes=F)
|
||||
axis(4, col="red", col.axis="red")
|
||||
}
|
||||
if(batch.mode == TRUE) {dev.off()} # close plot window
|
||||
|
||||
# ------------------------------------------
|
||||
# print input and results to screen
|
||||
cat("---------------------------------------\n")
|
||||
|
||||
cat("Species:", cinfo$ScientificName[cinfo$stock==stock], "\n")
|
||||
cat("Name and region:", cinfo$EnglishName[cinfo$stock==stock], ",", cinfo$Name[cinfo$stock==stock], "\n")
|
||||
cat("Stock:",stock,"\n")
|
||||
cat("Catch data used from years", min(yr),"-", max(yr), "\n")
|
||||
cat("Prior initial relative biomass =", startbio[1], "-", startbio[2], "\n")
|
||||
cat("Prior intermediate rel. biomass=", intbio[1], "-", intbio[2], "in year", intyr, "\n")
|
||||
cat("Prior final relative biomass =", endbio[1], "-", endbio[2], "\n")
|
||||
cat("If current catches continue, is the stock likely to crash within 3 years?",FutureCrash,"\n")
|
||||
cat("Prior range for r =", format(start_r[1],digits=2), "-", format(start_r[2],digits=2),
|
||||
", prior range for k =", start_k[1], "-", start_k[2],"\n")
|
||||
|
||||
# if data are simulated, print true r-k
|
||||
if(filename_1=="SimCatch.csv") {
|
||||
cat("True r =", r.sim, "(because input data were simulated with Schaefer model)\n")
|
||||
cat("True k = 1000 \n")
|
||||
cat("True MSY =", 1000*r.sim/4,"\n")
|
||||
cat("True biomass in last year =",bio[length(bio)],"or",bio[length(bio)]/1000,"k \n")
|
||||
cat("True mean catch / MSY ratio =", mean(ct)/(1000*r.sim/4),"\n")
|
||||
}
|
||||
# print results from full Schaefer if available
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
cat("Results from Bayesian Schaefer model using catch & biomass (",Btype,")\n")
|
||||
cat("MSY =", gm.MSY.jags,", 95% CL =", lcl.MSY.jags, "-", ucl.MSY.jags,"\n")
|
||||
cat("Mean catch / MSY =", mean(ct)/gm.MSY.jags,"\n")
|
||||
if(Btype != "CPUE") {
|
||||
cat("r =", gm.r.jags,", 95% CL =", lcl.r.jags, "-", ucl.r.jags,"\n")
|
||||
cat("k =", gm.k.jags,", 95% CL =", lcl.k.jags, "-", ucl.k.jags,"\n")
|
||||
}
|
||||
}
|
||||
# results of CMSY analysis
|
||||
cat("Results of CMSY analysis \n")
|
||||
cat("Altogether", nviablepoints,"unique viable r-k pairs were found \n")
|
||||
cat(nviablepoints-length(rem.log.r),"r-k pairs above the initial geometric mean of r =", gm.rv, "were analysed\n")
|
||||
cat("r =", r.est,", 95% CL =", lcl.r.est, "-", ucl.r.est,"\n")
|
||||
cat("k =", k.est,", 95% CL =", lcl.k.est, "-", ucl.k.est,"\n")
|
||||
cat("MSY =", MSY.est,", 95% CL =", lcl.MSY.est, "-", ucl.MSY.est,"\n")
|
||||
cat("Predicted biomass in last year =", lastyr.bio, "2.5th perc =", lcl.lastyr.bio,
|
||||
"97.5th perc =", ucl.lastyr.bio,"\n")
|
||||
cat("Predicted biomass in next year =", nextyr.bio, "2.5th perc =", lcl.nextyr.bio,
|
||||
"97.5th perc =", ucl.nextyr.bio,"\n")
|
||||
cat("----------------------------------------------------------\n")
|
||||
|
||||
## Write some results into outfile
|
||||
if(write.output == TRUE) {
|
||||
# write data into csv file
|
||||
output = data.frame(cinfo$ScientificName[cinfo$stock==stock], stock, StartYear, EndYear, mean(ct)*1000,
|
||||
ifelse(Btype=="observed"|Btype=="simulate",bio[length(bio)],NA), # last biomass on record
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.MSY.jags,NA), # full Schaefer
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.MSY.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.MSY.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.r.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",gm.k.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",lcl.k.jags,NA),
|
||||
ifelse(Btype == "observed"|Btype=="simulated",ucl.k.jags,NA),
|
||||
r.est, lcl.r.est, ucl.r.est, # CMSY r
|
||||
k.est, lcl.k.est, ucl.k.est, # CMSY k
|
||||
MSY.est, lcl.MSY.est, ucl.MSY.est, # CMSY r
|
||||
lastyr.bio, lcl.lastyr.bio, ucl.lastyr.bio, # last year bio
|
||||
nextyr.bio, lcl.nextyr.bio, ucl.nextyr.bio)# last year + 1 bio
|
||||
|
||||
write.table(output, file=outfile, append = T, sep = ",",
|
||||
dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
|
||||
# write some text into text outfile.txt
|
||||
|
||||
cat("Species:", cinfo$ScientificName[cinfo$stock==stock], "\n",
|
||||
"Name:", cinfo$EnglishName[cinfo$stock==stock], "\n",
|
||||
"Region:", cinfo$Name[cinfo$stock==stock], "\n",
|
||||
"Stock:",stock,"\n",
|
||||
"Catch data used from years", min(yr),"-", max(yr),", biomass =", Btype, "\n",
|
||||
"Prior initial relative biomass =", startbio[1], "-", startbio[2], "\n",
|
||||
"Prior intermediate rel. biomass=", intbio[1], "-", intbio[2], "in year", intyr, "\n",
|
||||
"Prior final relative biomass =", endbio[1], "-", endbio[2], "\n",
|
||||
"Future crash with current catches?", FutureCrash, "\n",
|
||||
"Prior range for r =", format(start_r[1],digits=2), "-", format(start_r[2],digits=2),
|
||||
", prior range for k =", start_k[1], "-", start_k[2],"\n",
|
||||
file=outfile.txt,append=T)
|
||||
|
||||
if(filename_1=="SimCatch.csv") {
|
||||
cat(" True r =", r.sim, "(because input data were simulated with Schaefer model)\n",
|
||||
"True k = 1000, true MSY =", 1000*r.sim/4,"\n",
|
||||
"True biomass in last year =",bio[length(bio)],"or",bio[length(bio)]/1000,"k \n",
|
||||
"True mean catch / MSY ratio =", mean(ct)/(1000*r.sim/4),"\n",
|
||||
file=outfile.txt,append=T)
|
||||
}
|
||||
if(Btype == "observed"|Btype=="simulated") {
|
||||
cat(" Results from Bayesian Schaefer model using catch & biomass \n",
|
||||
"r =", gm.r.jags,", 95% CL =", lcl.r.jags, "-", ucl.r.jags,"\n",
|
||||
"k =", gm.k.jags,", 95% CL =", lcl.k.jags, "-", ucl.k.jags,"\n",
|
||||
"MSY =", gm.MSY.jags,", 95% CL =", lcl.MSY.jags, "-", ucl.MSY.jags,"\n",
|
||||
"Mean catch / MSY =", mean(ct)/gm.MSY.jags,"\n",
|
||||
file=outfile.txt,append=T)
|
||||
}
|
||||
cat(" Results of CMSY analysis with altogether", nviablepoints,"unique viable r-k pairs \n",
|
||||
nviablepoints-length(rem.log.r),"r-k pairs above the initial geometric mean of r =", gm.rv, "were analysed\n",
|
||||
"r =", r.est,", 95% CL =", lcl.r.est, "-", ucl.r.est,"\n",
|
||||
"k =", k.est,", 95% CL =", lcl.k.est, "-", ucl.k.est,"\n",
|
||||
"MSY =", MSY.est,", 95% CL =", lcl.MSY.est, "-", ucl.MSY.est,"\n",
|
||||
"Predicted biomass last year b/k =", lastyr.bio, "2.5th perc b/k =", lcl.lastyr.bio,
|
||||
"97.5th perc b/k =", ucl.lastyr.bio,"\n",
|
||||
"Precautionary 25th percentile b/k =",q.btv[yr==EndYear],"\n",
|
||||
"----------------------------------------------------------\n",
|
||||
file=outfile.txt,append=T)
|
||||
|
||||
}
|
||||
|
||||
} # end of stocks loop
|
|
@ -0,0 +1,435 @@
|
|||
set.seed(999) ## for same random sequence
|
||||
#require(hacks)
|
||||
#13/05/2015
|
||||
#setwd("C:/Users/Ye/Documents/Data poor fisheries/Martell Froese Method/")
|
||||
|
||||
## Read Data for stock, year=yr, catch=ct, and resilience=res. Expects space delimited file with header yr ct and years in integer and catch in real with decimal point
|
||||
## For example
|
||||
## stock res yr ct
|
||||
## cap-icel Medium 1984 1234.32
|
||||
|
||||
## filename <- "RAM_MSY.csv"
|
||||
##filename <- "ICESct2.csv"
|
||||
|
||||
cat("Step 1","\n")
|
||||
TestRUN <- F # if it is true, just run on the test samples, false will go for a formal run!
|
||||
|
||||
filename <- "D20.csv"
|
||||
outfile <- "CatchMSY_Output.csv"
|
||||
outfile2 <- paste("NonProcessedSpecies.csv",sep="")
|
||||
|
||||
#cdat <- read.csv2(filename, header=T, dec=".")
|
||||
cdat1 <- read.csv(filename)
|
||||
cat("\n", "File", filename, "read successfully","\n")
|
||||
|
||||
|
||||
cat("Step 2","\n")
|
||||
if(file.exists("cdat.RData"))
|
||||
{load("cdat.RData")} else
|
||||
{
|
||||
|
||||
dim(cdat1)
|
||||
yrs=1950:2013
|
||||
|
||||
# to set NA as 0
|
||||
cdat1[is.na(cdat1)] <- 0
|
||||
nrow <- length(cdat1[,1])
|
||||
ndatColn <- length(cdat1[1,c(-1:-12)])
|
||||
rownames(cdat1) <- NULL
|
||||
|
||||
cdat <- NULL
|
||||
|
||||
for(i in 1:nrow)
|
||||
#for(i in 1:5)
|
||||
|
||||
{#i=1
|
||||
#a <- ctotal3[i,-1]
|
||||
tmp=data.frame(stock=rep(as.character(cdat1[i,"Stock_ID"]),ndatColn),
|
||||
species=rep(as.character(cdat1[i,"Scientific_name"]),ndatColn),
|
||||
yr=yrs,ct=unlist(c(cdat1[i,-c(1:12)])),
|
||||
res=rep(cdat1[i,"ResilienceIndex"],ndatColn))
|
||||
|
||||
cdat <- rbind(cdat,tmp)
|
||||
#edit(cdat)
|
||||
}
|
||||
save(cdat,file="cdat.RData")
|
||||
}
|
||||
|
||||
StockList=unique(as.character(cdat$stock))
|
||||
|
||||
cat("Step 3","\n")
|
||||
## FUNCTIONS are going to be used subsequently
|
||||
.schaefer <- function(theta)
|
||||
{
|
||||
with(as.list(theta), { ## for all combinations of ri & ki
|
||||
bt=vector()
|
||||
ell = 0 ## initialize ell
|
||||
J=0 #Ye
|
||||
for (j in startbt)
|
||||
{
|
||||
if(ell == 0)
|
||||
{
|
||||
bt[1]=j*k*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r*bt[i]*(1-bt[i]/k)-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
|
||||
#Bernoulli likelihood, assign 0 or 1 to each combination of r and k
|
||||
ell = 0
|
||||
if(bt[nyr+1]/k>=lam1 && bt[nyr+1]/k <=lam2 && min(bt) > 0 && max(bt) <=k && bt[which(yr==interyr)]/k>=interbio[1] && bt[which(yr==interyr)]/k<=interbio[2])
|
||||
ell = 1
|
||||
J=j # Ye
|
||||
}
|
||||
}
|
||||
return(list(ell=ell,J=J)) # Ye adding J=J
|
||||
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
sraMSY <-function(theta, N)
|
||||
{
|
||||
#This function conducts the stock reduction
|
||||
#analysis for N trials
|
||||
#args:
|
||||
# theta - a list object containing:
|
||||
# r (lower and upper bounds for r)
|
||||
# k (lower and upper bounds for k)
|
||||
# lambda (limits for current depletion)
|
||||
|
||||
|
||||
with(as.list(theta),
|
||||
{
|
||||
ri = exp(runif(N, log(r[1]), log(r[2]))) ## get N values between r[1] and r[2], assign to ri
|
||||
ki = exp(runif(N, log(k[1]), log(k[2]))) ## get N values between k[1] and k[2], assing to ki
|
||||
itheta=cbind(r=ri,k=ki, lam1=lambda[1],lam2=lambda[2], sigR=sigR)
|
||||
## assign ri, ki, and final biomass range to itheta
|
||||
M = apply(itheta,1,.schaefer) ## call Schaefer function with parameters in itheta
|
||||
i=1:N
|
||||
## prototype objective function
|
||||
get.ell=function(i) M[[i]]$ell
|
||||
ell = sapply(i, get.ell)
|
||||
get.J=function(i) M[[i]]$J # Ye
|
||||
J=sapply(i,get.J) # Ye
|
||||
return(list(r=ri,k=ki, ell=ell, J=J)) # Ye adding J=J
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
getBiomass <- function(r, k, j)
|
||||
{
|
||||
BT <- NULL
|
||||
bt=vector()
|
||||
for (v in 1:length(r))
|
||||
{
|
||||
bt[1]=j[v]*k[v]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r[v]*bt[i]*(1-bt[i]/k[v])-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
BT=rbind(BT, t(t(bt)))
|
||||
}
|
||||
return(BT)
|
||||
}
|
||||
|
||||
## The End of Functions section
|
||||
|
||||
|
||||
cat("Step 4","\n")
|
||||
stockLoop <- StockList
|
||||
# randomly select stocks from randomly selected 5 area codes first
|
||||
if(TestRUN)
|
||||
{
|
||||
set.seed(999)
|
||||
AreaCodeList <- unique(cdat1$AREA_Code)
|
||||
sampledAC <- sample(AreaCodeList,size=5,replace=F)
|
||||
stockLoop <- cdat1[cdat1$AREA_Code %in% sampledAC,c("Stock_ID")]
|
||||
}
|
||||
|
||||
#setup counters
|
||||
counter1 <- 0
|
||||
counter2 <- 0
|
||||
|
||||
cat("Step 4","\n")
|
||||
## Loop through stocks
|
||||
for(stock in stockLoop)
|
||||
{
|
||||
t0<-Sys.time()
|
||||
##stock = "3845" # NB only for test single loop!
|
||||
## make graph file names:
|
||||
b <- with(cdat1,cdat1[Stock_ID == stock,c(1,3,5,12)]) # Stock_ID,AREA_Names,Country,"Species"
|
||||
bb <- do.call(paste,b)
|
||||
|
||||
yr <- cdat$yr[as.character(cdat$stock)==stock]
|
||||
ct <- as.numeric(cdat$ct[as.character(cdat$stock)==stock])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
res <- unique(as.character(cdat$res[as.character(cdat$stock)==stock])) ## resilience from FishBase, if needed, enable in PARAMETER SECTION
|
||||
nyr <- length(yr) ## number of years in the time series
|
||||
|
||||
cat("\n","Stock",stock,"\n")
|
||||
flush.console()
|
||||
|
||||
## PARAMETER SECTION
|
||||
mvlen=3
|
||||
ma=function(x,n=mvlen){filter(x,rep(1/n,n),sides=1)}
|
||||
|
||||
## If resilience is to be used, delete ## in rows 1-4 below and set ## in row 5 below
|
||||
start_r <- if(res == "Very low"){c(0.015, 0.1)}else{
|
||||
if(res == "Low") {c(0.05,0.5)}else {
|
||||
if(res == "High") {c(0.6,1.5)}else {c(0.2,1)}
|
||||
}
|
||||
}
|
||||
## Medium, or default if no res is found
|
||||
##start_r <- c(0.5,1.5) ## disable this line if you use resilience
|
||||
start_k <- c(max(ct),50*max(ct)) ## default for upper k e.g. 100 * max catch
|
||||
## startbio <- c(0.8,1) ## assumed biomass range at start of time series, as fraction of k
|
||||
##startbio <- if(ct[1]/max(ct) < 0.5) {c(0.5,0.9)} else {c(0.3,0.6)} ## use for batch processing
|
||||
|
||||
## NB: Yimin's new idea on 20Jan14
|
||||
startbio<- if(mean(ct[1:5])/max(ct) < 0.3) {c(0.6,0.95)} else {
|
||||
if(mean(ct[1:5])/max(ct)>0.3&mean(ct[1:5])/max(ct)<0.6) {c(0.3,0.7)} else {
|
||||
c(0.2,0.6)}}
|
||||
|
||||
interyr <- yr[2] ## interim year within time series for which biomass estimate is available; set to yr[2] if no estimates are available
|
||||
interbio <- c(0, 1) ## biomass range for interim year, as fraction of k; set to 0 and 1 if not available
|
||||
## finalbio <- c(0.8, 0.9) ## biomass range after last catches, as fraction of k
|
||||
## finalbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.3,0.7)} else {c(0.01,0.4)} ## use for batch processing
|
||||
|
||||
## Yimin's new stuff on 10Mar14
|
||||
#######> pre-classification
|
||||
|
||||
pre.clas=ct
|
||||
pre.clas[pre.clas==0]=0.1
|
||||
tx=ma(as.numeric(pre.clas),n=mvlen)
|
||||
Myr=which.max(tx)
|
||||
Maxc=pre.clas[which.max(tx)]
|
||||
|
||||
|
||||
if(Myr==1)startbio=c(0.05,0.6)else
|
||||
{
|
||||
if (ct[1]/Maxc>=0.5) startbio=c(0.4,0.85)
|
||||
else startbio=c(0.65,0.95)
|
||||
}
|
||||
|
||||
if (Myr==length(yr))finalbio=c(.4,.95) else # ie from fully to overexploited
|
||||
{
|
||||
if (tx[length(ct)]/Maxc>=0.5) finalbio=c(.4,.85)
|
||||
else finalbio=c(.05,.6)
|
||||
}
|
||||
|
||||
|
||||
# if (Myr==length(yr))finalbio=c(.5,.9)
|
||||
# #if (Myr<length(yr)){
|
||||
# # if ((tx[length(ct)]/Maxc)>=0.8) finalbio=c(.4,.8) else
|
||||
# # if (tx[length(ct)]/Maxc>0.5) finalbio=c(.3,.7) else finalbio=c(.05,.6)}
|
||||
# # below is the last used (20 Feb)
|
||||
# if (Myr<length(yr))
|
||||
# {
|
||||
# if (tx[length(ct)]/Maxc>0.5) finalbio=c(.2,.8)
|
||||
# else finalbio=c(.05,.6)
|
||||
# }
|
||||
|
||||
##############<
|
||||
n <- 30000 ## number of iterations, e.g. 100000
|
||||
sigR <- 0.0 ## process error; 0 if deterministic model; 0.05 reasonable value? 0.2 is too high
|
||||
|
||||
startbt <- seq(startbio[1], startbio[2], by = 0.05) ## apply range of start biomass in steps of 0.05
|
||||
parbound <- list(r = start_r, k = start_k, lambda = finalbio, sigR)
|
||||
|
||||
cat("Last year =",max(yr),", last catch =",1000*ct[nyr],"\n")
|
||||
cat("Resilience =",res,"\n")
|
||||
cat("Process error =", sigR,"\n")
|
||||
cat("Assumed initial biomass (B/k) =", startbio[1],"-", startbio[2], " k","\n")
|
||||
cat("Assumed intermediate biomass (B/k) in", interyr, " =", interbio[1],"-",interbio[2]," k","\n")
|
||||
cat("Assumed final biomass (B/k) =", parbound$lambda[1],"-",parbound$lambda[2]," k","\n")
|
||||
cat("Initial bounds for r =", parbound$r[1], "-", parbound$r[2],"\n")
|
||||
cat("Initial bounds for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
flush.console()
|
||||
|
||||
## MAIN
|
||||
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k, MSY and determine new bounds for r and k
|
||||
r1 <- R1$r[R1$ell==1]
|
||||
k1 <- R1$k[R1$ell==1]
|
||||
j1 <- R1$J[R1$ell==1] # Ye
|
||||
msy1 <- r1*k1/4
|
||||
mean_msy1 <- exp(mean(log(msy1)))
|
||||
max_k1a <- min(k1[r1<1.1*parbound$r[1]]) ## smallest k1 near initial lower bound of r
|
||||
max_k1b <- max(k1[r1*k1/4<mean_msy1]) ## largest k1 that gives mean MSY
|
||||
max_k1 <- if(max_k1a < max_k1b) {max_k1a} else {max_k1b}
|
||||
|
||||
if(length(r1)<10)
|
||||
{
|
||||
cat("Too few (", length(r1), ") possible r-k combinations,
|
||||
check input parameters","\n")
|
||||
appendPar <- ifelse(counter1==0,F,T)
|
||||
colnamePar <- ifelse(counter1==0,T,F)
|
||||
|
||||
NoModellingSpe <- as.data.frame(cbind(stock,length(r1),b))
|
||||
names(NoModellingSpe) <- c("Stock","No_of_r1",names(b))
|
||||
write.table(NoModellingSpe,file=outfile2,
|
||||
append = appendPar, row.names = FALSE,
|
||||
col.names=colnamePar,sep=",")
|
||||
flush.console()
|
||||
counter1 <- counter1 + 1
|
||||
}
|
||||
|
||||
if(length(r1)>=10)
|
||||
{
|
||||
## set new upper bound of r to 1.2 max r1
|
||||
parbound$r[2] <- 1.2*max(r1)
|
||||
## set new lower bound for k to 0.9 min k1 and upper bound to max_k1
|
||||
parbound$k <- c(0.9 * min(k1), max_k1)
|
||||
|
||||
cat("First MSY =", format(1000*mean_msy1, digits=3),"\n")
|
||||
cat("First r =", format(exp(mean(log(r1))), digits=3),"\n")
|
||||
cat("New upper bound for r =", format(parbound$r[2],digits=2),"\n")
|
||||
cat("New range for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
## Repeat analysis with new r-k bounds
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k and msy
|
||||
r = R1$r[R1$ell==1]
|
||||
k = R1$k[R1$ell==1]
|
||||
j = R1$J[R1$ell==1] # Ye
|
||||
msy = r * k / 4
|
||||
mean_ln_msy = mean(log(msy))
|
||||
|
||||
##############################################################
|
||||
##> Ye
|
||||
# BT=0
|
||||
|
||||
##
|
||||
R2<-getBiomass(r, k, j)
|
||||
|
||||
#R2<-R2[-1,]
|
||||
runs<-rep(1:length(r), each=nyr+1)
|
||||
years=rep(yr[1]:(yr[length(yr)]+1),length=length(r)*(length(yr)+1))
|
||||
|
||||
runs=t(runs)
|
||||
years=t(years)
|
||||
stock_id=rep(stock,length(runs))
|
||||
R3<-cbind(as.numeric(runs), as.numeric(years), stock_id, as.numeric(R2) )
|
||||
|
||||
## changed this, as otherwise biomass is the level of the factor below
|
||||
R4<-data.frame(R3, stringsAsFactors=FALSE)
|
||||
names(R4)<-c("Run", "Year", "Stock","Biomass")
|
||||
|
||||
Bmsy_x<-k*0.5
|
||||
Run<-c(1:length(r))
|
||||
BMSY<-cbind(Run, Bmsy_x)
|
||||
R5<-merge(R4, BMSY, by="Run", all.x=T, all.y=F)
|
||||
R5$B_Bmsy<-as.numeric(paste(R5$Biomass))/R5$Bmsy_x
|
||||
|
||||
### B/Bmsy calculated for all feasible combinations of r,K,B0
|
||||
R6<-aggregate(log(B_Bmsy)~as.numeric(Year)+Stock, data=R5,
|
||||
FUN=function(z){c(mean=mean(z),sd=sd(z),upr=exp(quantile(z, p=0.975)),
|
||||
lwr=exp(quantile(z, p=0.025)), lwrQ=exp(quantile(z, p=0.25)),
|
||||
uprQ=exp(quantile(z, p=0.75)))}) # from directly calculated from R5 becasue B_Bmsy has a lognormal dist
|
||||
|
||||
R6<-data.frame(cbind(R6[,1:2],R6[,3][,1],R6[,3][,2],R6[,3][,3],R6[,3][,4],R6[,3][,5], R6[,3][,6]))
|
||||
names(R6)<-c("Year", "Stock", "BoverBmsy", "BoverBmsySD","BoverBmsyUpper","BoverBmsyLower","BoverBmsylwrQ","BoverBmsyuprQ")
|
||||
##remove last entry as it is 1 greater than number of years
|
||||
## removed final year here for ease of dataframe output below
|
||||
R6<-R6[-length(R6),]
|
||||
## geometric mean
|
||||
GM_B_Bmsy<-exp(R6$BoverBmsy)
|
||||
GM_B_BmsySD=R6$BoverBmsySD #add
|
||||
## arithmetic mean
|
||||
M_B_Bmsy<-exp(R6$BoverBmsy+R6$BoverBmsySD^2/2)
|
||||
|
||||
### r,k, and MSY
|
||||
|
||||
#del GM_B_Bmsy=c(rep(0,(min(yr)-1940)),GM_B_Bmsy)
|
||||
#del GM_B_BmsySD=c(rep(0,(min(yr)-1940)),GM_B_BmsySD) ######
|
||||
#del M_B_Bmsy=c(rep(0,(min(yr)-1940)),M_B_Bmsy)
|
||||
#del yr1=seq(1940,max(yr))
|
||||
|
||||
yr1=yr #add
|
||||
|
||||
stockInfo <- with(cdat1,cdat1[Stock_ID==stock,1:12])
|
||||
temp=c(startbio[1],startbio[2],finalbio[1],finalbio[2],res,
|
||||
mean(log(r)),sd(log(r)),mean(log(k)),sd(log(k)),mean(log(msy)),
|
||||
sd(log(msy)),sigR,min(yr),max(yr),max(ct),length(r),GM_B_Bmsy,GM_B_BmsySD,M_B_Bmsy)
|
||||
|
||||
#add, adding "GM_B_BmsySD" in the line above
|
||||
|
||||
output=as.data.frame(matrix(temp,nrow=1))
|
||||
output <- cbind(stockInfo,output)
|
||||
names(output) <- c(names(cdat1)[1:12],"startbio[1]","startbio[2]","finalbio[1]","finalbio[2]",
|
||||
"res","mean(log(r))","sd(log(r))","mean(log(k))","sd(log(k))",
|
||||
"mean(log(msy))","sd(log(msy))","sigR","min(yr)","max(yr)","max(ct)",
|
||||
"length(r)",paste("GM_B_msy",yr1,sep="_"),paste("GM_B_msySD",yr1,sep="_"),paste("M_B_Bmsy",yr1,sep="_"))
|
||||
|
||||
#add, adding "paste("GM_B_msySD",yr1,sep="_")"in the line above
|
||||
|
||||
######< Ye
|
||||
########################################################
|
||||
|
||||
## plot MSY over catch data
|
||||
pdf(paste(bb,"graph.pdf",sep="_"))
|
||||
|
||||
par(mfcol=c(2,3))
|
||||
plot(yr, ct, type="l", ylim = c(0, max(ct)), xlab = "Year",
|
||||
ylab = "Catch (1000 t)",main = paste("StockID",stock,sep=":"))
|
||||
abline(h=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(h=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(h=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
hist(r, freq=F, xlim=c(0, 1.2 * max(r)), main = "")
|
||||
abline(v=exp(mean(log(r))),col="red",lwd=2)
|
||||
abline(v=exp(mean(log(r))-2*sd(log(r))),col="red")
|
||||
abline(v=exp(mean(log(r))+2*sd(log(r))),col="red")
|
||||
|
||||
plot(r1, k1, xlim = start_r, ylim = start_k, xlab="r", ylab="k (1000t)")
|
||||
|
||||
hist(k, freq=F, xlim=c(0, 1.2 * max(k)), xlab="k (1000t)", main = "")
|
||||
abline(v=exp(mean(log(k))),col="red", lwd=2)
|
||||
abline(v=exp(mean(log(k))-2*sd(log(k))),col="red")
|
||||
abline(v=exp(mean(log(k))+2*sd(log(k))),col="red")
|
||||
|
||||
|
||||
plot(log(r), log(k),xlab="ln(r)",ylab="ln(k)")
|
||||
abline(v=mean(log(r)))
|
||||
abline(h=mean(log(k)))
|
||||
abline(mean(log(msy))+log(4),-1, col="red",lwd=2)
|
||||
abline(mean(log(msy))-2*sd(log(msy))+log(4),-1, col="red")
|
||||
abline(mean(log(msy))+2*sd(log(msy))+log(4),-1, col="red")
|
||||
|
||||
hist(msy, freq=F, xlim=c(0, 1.2 * max(msy)), xlab="MSY (1000t)",main = "")
|
||||
abline(v=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(v=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(v=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
graphics.off()
|
||||
|
||||
cat("Possible combinations = ", length(r),"\n")
|
||||
cat("geom. mean r =", format(exp(mean(log(r))),digits=3), "\n")
|
||||
cat("r +/- 2 SD =", format(exp(mean(log(r))-2*sd(log(r))),digits=3),"-",format(exp(mean(log(r))+2*sd(log(r))),digits=3), "\n")
|
||||
cat("geom. mean k =", format(1000*exp(mean(log(k))),digits=3), "\n")
|
||||
cat("k +/- 2 SD =", format(1000*exp(mean(log(k))-2*sd(log(k))),digits=3),"-",format(1000*exp(mean(log(k))+2*sd(log(k))),digits=3), "\n")
|
||||
cat("geom. mean MSY =", format(1000*exp(mean(log(msy))),digits=3),"\n")
|
||||
cat("MSY +/- 2 SD =", format(1000*exp(mean_ln_msy - 2 * sd(log(msy))),digits=3), "-", format(1000*exp(mean_ln_msy + 2 * sd(log(msy))),digits=3), "\n")
|
||||
|
||||
## Write results into outfile, in append mode (no header in file, existing files will be continued)
|
||||
## output = data.frame(stock, sigR, startbio[1], startbio[2], interbio[1], interbio[2], finalbio[1], finalbio[2], min(yr), max(yr), res, max(ct), ct[1], ct[nyr], length(r), exp(mean(log(r))), sd(log(r)), min(r), quantile(r,0.05), quantile(r,0.25), median(r), quantile(r,0.75), quantile(r,0.95), max(r), exp(mean(log(k))), sd(log(k)), min(k), quantile(k, 0.05), quantile(k, 0.25), median(k), quantile(k, 0.75), quantile(k, 0.95), max(k), exp(mean(log(msy))), sd(log(msy)), min(msy), quantile(msy, 0.05), quantile(msy, 0.25), median(msy), quantile(msy, 0.75), quantile(msy, 0.95), max(msy))
|
||||
|
||||
#write.table(output, file = outfile, append = TRUE, sep = ";", dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
appendPar <- ifelse(counter2==0,F,T)
|
||||
colnamePar <- ifelse(counter2==0,T,F)
|
||||
write.table(output, file = outfile, append = appendPar, sep = ",", dec = ".",
|
||||
row.names = FALSE, col.names = colnamePar)
|
||||
|
||||
counter2 <- counter2 + 1
|
||||
|
||||
}
|
||||
cat("Elapsed: ",Sys.time()-t0," \n")
|
||||
} ## End of stock loop, get next stock or exit
|
|
@ -0,0 +1,440 @@
|
|||
set.seed(999) ## for same random sequence
|
||||
#require(hacks)
|
||||
|
||||
#setwd("C:/Users/Ye/Documents/Data poor fisheries/Martell Froese Method/")
|
||||
|
||||
## Read Data for stock, year=yr, catch=ct, and resilience=res. Expects space delimited file with header yr ct and years in integer and catch in real with decimal point
|
||||
## For example
|
||||
## stock res yr ct
|
||||
## cap-icel Medium 1984 1234.32
|
||||
|
||||
## filename <- "RAM_MSY.csv"
|
||||
##filename <- "ICESct2.csv"
|
||||
|
||||
cat("Step 1","\n")
|
||||
TestRUN <- F # if it is true, just run on the test samples, false will go for a formal run!
|
||||
|
||||
filename <- "D20.csv"
|
||||
outfile <- "CatchMSY_Output.csv"
|
||||
outfile2 <- paste("NonProcessedSpecies.csv",sep="")
|
||||
|
||||
#cdat <- read.csv2(filename, header=T, dec=".")
|
||||
cdat1 <- read.csv(filename)
|
||||
cat("\n", "File", filename, "read successfully","\n")
|
||||
|
||||
cat("Step 2","\n")
|
||||
if(file.exists("cdat.RData"))
|
||||
{load("cdat.RData")} else
|
||||
{
|
||||
|
||||
dim(cdat1)
|
||||
yrs=1950:2012
|
||||
|
||||
# to set NA as 0
|
||||
cdat1[is.na(cdat1)] <- 0
|
||||
nrow <- length(cdat1[,1])
|
||||
ndatColn <- length(cdat1[1,c(-1:-12)])
|
||||
rownames(cdat1) <- NULL
|
||||
|
||||
cdat <- NULL
|
||||
for(i in 1:nrow)
|
||||
{#i=1
|
||||
#a <- ctotal3[i,-1]
|
||||
tmp=data.frame(stock=rep(as.character(cdat1[i,"Stock_ID"]),ndatColn),
|
||||
species=rep(as.character(cdat1[i,"Scientific_name"]),ndatColn),
|
||||
yr=yrs,ct=unlist(c(cdat1[i,c(-1:-12)])),
|
||||
res=rep(cdat1[i,"ResilienceIndex"],ndatColn))
|
||||
|
||||
cdat <- rbind(cdat,tmp)
|
||||
#edit(cdat)
|
||||
}
|
||||
}
|
||||
|
||||
StockList=unique(as.character(cdat$stock))
|
||||
|
||||
colnames(cdat)
|
||||
|
||||
|
||||
#stock_id <- unique(as.character(cdat$stock))
|
||||
#??
|
||||
# stock_id <- "cod-2224" ## for selecting individual stocks
|
||||
# stock=stock_id
|
||||
#??
|
||||
|
||||
cat("Step 3","\n")
|
||||
|
||||
## FUNCTIONS are going to be used subsequently
|
||||
.schaefer <- function(theta)
|
||||
{
|
||||
with(as.list(theta), { ## for all combinations of ri & ki
|
||||
bt=vector()
|
||||
ell = 0 ## initialize ell
|
||||
J=0 #Ye
|
||||
for (j in startbt)
|
||||
{
|
||||
if(ell == 0)
|
||||
{
|
||||
bt[1]=j*k*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r*bt[i]*(1-bt[i]/k)-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
|
||||
#Bernoulli likelihood, assign 0 or 1 to each combination of r and k
|
||||
ell = 0
|
||||
if(bt[nyr+1]/k>=lam1 && bt[nyr+1]/k <=lam2 && min(bt) > 0 && max(bt) <=k && bt[which(yr==interyr)]/k>=interbio[1] && bt[which(yr==interyr)]/k<=interbio[2])
|
||||
ell = 1
|
||||
J=j # Ye
|
||||
}
|
||||
}
|
||||
return(list(ell=ell,J=J)) # Ye adding J=J
|
||||
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
sraMSY <-function(theta, N)
|
||||
{
|
||||
#This function conducts the stock reduction
|
||||
#analysis for N trials
|
||||
#args:
|
||||
# theta - a list object containing:
|
||||
# r (lower and upper bounds for r)
|
||||
# k (lower and upper bounds for k)
|
||||
# lambda (limits for current depletion)
|
||||
|
||||
|
||||
with(as.list(theta),
|
||||
{
|
||||
ri = exp(runif(N, log(r[1]), log(r[2]))) ## get N values between r[1] and r[2], assign to ri
|
||||
ki = exp(runif(N, log(k[1]), log(k[2]))) ## get N values between k[1] and k[2], assing to ki
|
||||
itheta=cbind(r=ri,k=ki, lam1=lambda[1],lam2=lambda[2], sigR=sigR)
|
||||
## assign ri, ki, and final biomass range to itheta
|
||||
M = apply(itheta,1,.schaefer) ## call Schaefer function with parameters in itheta
|
||||
i=1:N
|
||||
## prototype objective function
|
||||
get.ell=function(i) M[[i]]$ell
|
||||
ell = sapply(i, get.ell)
|
||||
get.J=function(i) M[[i]]$J # Ye
|
||||
J=sapply(i,get.J) # Ye
|
||||
return(list(r=ri,k=ki, ell=ell, J=J)) # Ye adding J=J
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
getBiomass <- function(r, k, j)
|
||||
{
|
||||
BT <- NULL
|
||||
bt=vector()
|
||||
for (v in 1:length(r))
|
||||
{
|
||||
bt[1]=j[v]*k[v]*exp(rnorm(1,0, sigR)) ## set biomass in first year
|
||||
for(i in 1:nyr) ## for all years in the time series
|
||||
{
|
||||
xt=rnorm(1,0, sigR)
|
||||
bt[i+1]=(bt[i]+r[v]*bt[i]*(1-bt[i]/k[v])-ct[i])*exp(xt)
|
||||
## calculate biomass as function of previous year's biomass plus net production minus catch
|
||||
}
|
||||
BT=rbind(BT, t(t(bt)))
|
||||
}
|
||||
return(BT)
|
||||
}
|
||||
|
||||
## The End of Functions section
|
||||
|
||||
cat("Step 4","\n")
|
||||
stockLoop <- StockList
|
||||
# randomly select stocks from randomly selected 5 area codes first
|
||||
if(TestRUN)
|
||||
{
|
||||
set.seed(999)
|
||||
AreaCodeList <- unique(cdat1$AREA_Code)
|
||||
sampledAC <- sample(AreaCodeList,size=5,replace=F)
|
||||
stockLoop <- cdat1[cdat1$AREA_Code %in% sampledAC,c("Stock_ID")]
|
||||
}
|
||||
|
||||
#setup counters
|
||||
counter1 <- 0
|
||||
counter2 <- 0
|
||||
|
||||
cat("Step 4","\n")
|
||||
## Loop through stocks
|
||||
for(stock in stockLoop)
|
||||
{
|
||||
t0<-Sys.time()
|
||||
##stock = "3845" # NB only for test single loop!
|
||||
## make graph file names:
|
||||
b <- with(cdat1,cdat1[Stock_ID == stock,c(1,3,5,12)]) # Stock_ID,AREA_Names,Country,"Species"
|
||||
bb <- do.call(paste,b)
|
||||
|
||||
yr <- cdat$yr[as.character(cdat$stock)==stock]
|
||||
ct <- as.numeric(cdat$ct[as.character(cdat$stock)==stock])/1000 ## assumes that catch is given in tonnes, transforms to '000 tonnes
|
||||
res <- unique(as.character(cdat$res[as.character(cdat$stock)==stock])) ## resilience from FishBase, if needed, enable in PARAMETER SECTION
|
||||
nyr <- length(yr) ## number of years in the time series
|
||||
|
||||
cat("\n","Stock",stock,"\n")
|
||||
flush.console()
|
||||
|
||||
## PARAMETER SECTION
|
||||
mvlen=3
|
||||
ma=function(x,n=mvlen){filter(x,rep(1/n,n),sides=1)}
|
||||
|
||||
## If resilience is to be used, delete ## in rows 1-4 below and set ## in row 5 below
|
||||
start_r <- if(res == "Very low"){c(0.015, 0.1)}else{
|
||||
if(res == "Low") {c(0.05,0.5)}else {
|
||||
if(res == "High") {c(0.6,1.5)}else {c(0.2,1)}
|
||||
}
|
||||
}
|
||||
## Medium, or default if no res is found
|
||||
##start_r <- c(0.5,1.5) ## disable this line if you use resilience
|
||||
start_k <- c(max(ct),50*max(ct)) ## default for upper k e.g. 100 * max catch
|
||||
## startbio <- c(0.8,1) ## assumed biomass range at start of time series, as fraction of k
|
||||
##startbio <- if(ct[1]/max(ct) < 0.5) {c(0.5,0.9)} else {c(0.3,0.6)} ## use for batch processing
|
||||
|
||||
## NB: Yimin's new idea on 20Jan14
|
||||
startbio<- if(mean(ct[1:5])/max(ct) < 0.3) {c(0.6,0.95)} else {
|
||||
if(mean(ct[1:5])/max(ct)>0.3&mean(ct[1:5])/max(ct)<0.6) {c(0.3,0.7)} else {
|
||||
c(0.2,0.6)}}
|
||||
|
||||
interyr <- yr[2] ## interim year within time series for which biomass estimate is available; set to yr[2] if no estimates are available
|
||||
interbio <- c(0, 1) ## biomass range for interim year, as fraction of k; set to 0 and 1 if not available
|
||||
## finalbio <- c(0.8, 0.9) ## biomass range after last catches, as fraction of k
|
||||
## finalbio <- if(ct[nyr]/max(ct) > 0.5) {c(0.3,0.7)} else {c(0.01,0.4)} ## use for batch processing
|
||||
|
||||
## Yimin's new stuff on 10Mar14
|
||||
#######> pre-classification
|
||||
|
||||
pre.clas=ct
|
||||
pre.clas[pre.clas==0]=0.1
|
||||
tx=ma(as.numeric(pre.clas),n=mvlen)
|
||||
Myr=which.max(tx)
|
||||
Maxc=pre.clas[which.max(tx)]
|
||||
|
||||
|
||||
if(Myr==1)startbio=c(0.05,0.6)else
|
||||
{
|
||||
if (ct[1]/Maxc>=0.5) startbio=c(0.4,0.85)
|
||||
else startbio=c(0.65,0.95)
|
||||
}
|
||||
|
||||
if (Myr==length(yr))finalbio=c(.4,.95) else # ie from fully to overexploited
|
||||
{
|
||||
if (tx[length(ct)]/Maxc>=0.5) finalbio=c(.4,.85)
|
||||
else finalbio=c(.05,.6)
|
||||
}
|
||||
|
||||
|
||||
# if (Myr==length(yr))finalbio=c(.5,.9)
|
||||
# #if (Myr<length(yr)){
|
||||
# # if ((tx[length(ct)]/Maxc)>=0.8) finalbio=c(.4,.8) else
|
||||
# # if (tx[length(ct)]/Maxc>0.5) finalbio=c(.3,.7) else finalbio=c(.05,.6)}
|
||||
# # below is the last used (20 Feb)
|
||||
# if (Myr<length(yr))
|
||||
# {
|
||||
# if (tx[length(ct)]/Maxc>0.5) finalbio=c(.2,.8)
|
||||
# else finalbio=c(.05,.6)
|
||||
# }
|
||||
|
||||
##############<
|
||||
n <- 30000 ## number of iterations, e.g. 100000
|
||||
sigR <- 0.0 ## process error; 0 if deterministic model; 0.05 reasonable value? 0.2 is too high
|
||||
|
||||
startbt <- seq(startbio[1], startbio[2], by = 0.05) ## apply range of start biomass in steps of 0.05
|
||||
parbound <- list(r = start_r, k = start_k, lambda = finalbio, sigR)
|
||||
|
||||
cat("Last year =",max(yr),", last catch =",1000*ct[nyr],"\n")
|
||||
cat("Resilience =",res,"\n")
|
||||
cat("Process error =", sigR,"\n")
|
||||
cat("Assumed initial biomass (B/k) =", startbio[1],"-", startbio[2], " k","\n")
|
||||
cat("Assumed intermediate biomass (B/k) in", interyr, " =", interbio[1],"-",interbio[2]," k","\n")
|
||||
cat("Assumed final biomass (B/k) =", parbound$lambda[1],"-",parbound$lambda[2]," k","\n")
|
||||
cat("Initial bounds for r =", parbound$r[1], "-", parbound$r[2],"\n")
|
||||
cat("Initial bounds for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
flush.console()
|
||||
|
||||
## MAIN
|
||||
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k, MSY and determine new bounds for r and k
|
||||
r1 <- R1$r[R1$ell==1]
|
||||
k1 <- R1$k[R1$ell==1]
|
||||
j1 <- R1$J[R1$ell==1] # Ye
|
||||
msy1 <- r1*k1/4
|
||||
mean_msy1 <- exp(mean(log(msy1)))
|
||||
max_k1a <- min(k1[r1<1.1*parbound$r[1]]) ## smallest k1 near initial lower bound of r
|
||||
max_k1b <- max(k1[r1*k1/4<mean_msy1]) ## largest k1 that gives mean MSY
|
||||
max_k1 <- if(max_k1a < max_k1b) {max_k1a} else {max_k1b}
|
||||
|
||||
if(length(r1)<10)
|
||||
{
|
||||
cat("Too few (", length(r1), ") possible r-k combinations,
|
||||
check input parameters","\n")
|
||||
appendPar <- ifelse(counter1==0,F,T)
|
||||
colnamePar <- ifelse(counter1==0,T,F)
|
||||
|
||||
NoModellingSpe <- as.data.frame(cbind(stock,length(r1),b))
|
||||
names(NoModellingSpe) <- c("Stock","No_of_r1",names(b))
|
||||
write.table(NoModellingSpe,file=outfile2,
|
||||
append = appendPar, row.names = FALSE,
|
||||
col.names=colnamePar,sep=",")
|
||||
flush.console()
|
||||
counter1 <- counter1 + 1
|
||||
}
|
||||
|
||||
if(length(r1)>=10)
|
||||
{
|
||||
## set new upper bound of r to 1.2 max r1
|
||||
parbound$r[2] <- 1.2*max(r1)
|
||||
## set new lower bound for k to 0.9 min k1 and upper bound to max_k1
|
||||
parbound$k <- c(0.9 * min(k1), max_k1)
|
||||
|
||||
cat("First MSY =", format(1000*mean_msy1, digits=3),"\n")
|
||||
cat("First r =", format(exp(mean(log(r1))), digits=3),"\n")
|
||||
cat("New upper bound for r =", format(parbound$r[2],digits=2),"\n")
|
||||
cat("New range for k =", format(1000*parbound$k[1], digits=3), "-", format(1000*parbound$k[2],digits=3),"\n")
|
||||
|
||||
## Repeat analysis with new r-k bounds
|
||||
R1 = sraMSY(parbound, n)
|
||||
|
||||
## Get statistics on r, k and msy
|
||||
r = R1$r[R1$ell==1]
|
||||
k = R1$k[R1$ell==1]
|
||||
j = R1$J[R1$ell==1] # Ye
|
||||
msy = r * k / 4
|
||||
mean_ln_msy = mean(log(msy))
|
||||
|
||||
##############################################################
|
||||
##> Ye
|
||||
# BT=0
|
||||
|
||||
##
|
||||
R2<-getBiomass(r, k, j)
|
||||
|
||||
#R2<-R2[-1,]
|
||||
runs<-rep(1:length(r), each=nyr+1)
|
||||
years=rep(yr[1]:(yr[length(yr)]+1),length=length(r)*(length(yr)+1))
|
||||
|
||||
runs=t(runs)
|
||||
years=t(years)
|
||||
stock_id=rep(stock,length(runs))
|
||||
R3<-cbind(as.numeric(runs), as.numeric(years), stock_id, as.numeric(R2) )
|
||||
|
||||
## changed this, as otherwise biomass is the level of the factor below
|
||||
R4<-data.frame(R3, stringsAsFactors=FALSE)
|
||||
names(R4)<-c("Run", "Year", "Stock","Biomass")
|
||||
|
||||
Bmsy_x<-k*0.5
|
||||
Run<-c(1:length(r))
|
||||
BMSY<-cbind(Run, Bmsy_x)
|
||||
R5<-merge(R4, BMSY, by="Run", all.x=T, all.y=F)
|
||||
R5$B_Bmsy<-as.numeric(paste(R5$Biomass))/R5$Bmsy_x
|
||||
|
||||
### B/Bmsy calculated for all feasible combinations of r,K,B0
|
||||
R6<-aggregate(log(B_Bmsy)~as.numeric(Year)+Stock, data=R5,
|
||||
FUN=function(z){c(mean=mean(z),sd=sd(z),upr=exp(quantile(z, p=0.975)),
|
||||
lwr=exp(quantile(z, p=0.025)), lwrQ=exp(quantile(z, p=0.25)),
|
||||
uprQ=exp(quantile(z, p=0.75)))}) # from directly calculated from R5 becasue B_Bmsy has a lognormal dist
|
||||
|
||||
R6<-data.frame(cbind(R6[,1:2],R6[,3][,1],R6[,3][,2],R6[,3][,3],R6[,3][,4],R6[,3][,5], R6[,3][,6]))
|
||||
names(R6)<-c("Year", "Stock", "BoverBmsy", "BoverBmsySD","BoverBmsyUpper","BoverBmsyLower","BoverBmsylwrQ","BoverBmsyuprQ")
|
||||
##remove last entry as it is 1 greater than number of years
|
||||
## removed final year here for ease of dataframe output below
|
||||
R6<-R6[-length(R6),]
|
||||
## geometric mean
|
||||
GM_B_Bmsy<-exp(R6$BoverBmsy)
|
||||
GM_B_BmsySD=R6$BoverBmsySD #add
|
||||
## arithmetic mean
|
||||
M_B_Bmsy<-exp(R6$BoverBmsy+R6$BoverBmsySD^2/2)
|
||||
|
||||
### r,k, and MSY
|
||||
|
||||
#del GM_B_Bmsy=c(rep(0,(min(yr)-1940)),GM_B_Bmsy)
|
||||
#del GM_B_BmsySD=c(rep(0,(min(yr)-1940)),GM_B_BmsySD) ######
|
||||
#del M_B_Bmsy=c(rep(0,(min(yr)-1940)),M_B_Bmsy)
|
||||
#del yr1=seq(1940,max(yr))
|
||||
|
||||
yr1=yr #add
|
||||
|
||||
stockInfo <- with(cdat1,cdat1[Stock_ID==stock,1:12])
|
||||
temp=c(startbio[1],startbio[2],finalbio[1],finalbio[2],res,
|
||||
mean(log(r)),sd(log(r)),mean(log(k)),sd(log(k)),mean(log(msy)),
|
||||
sd(log(msy)),sigR,min(yr),max(yr),max(ct),length(r),GM_B_Bmsy,GM_B_BmsySD,M_B_Bmsy)
|
||||
|
||||
#add, adding "GM_B_BmsySD" in the line above
|
||||
|
||||
output=as.data.frame(matrix(temp,nrow=1))
|
||||
output <- cbind(stockInfo,output)
|
||||
names(output) <- c(names(cdat1)[1:12],"startbio[1]","startbio[2]","finalbio[1]","finalbio[2]",
|
||||
"res","mean(log(r))","sd(log(r))","mean(log(k))","sd(log(k))",
|
||||
"mean(log(msy))","sd(log(msy))","sigR","min(yr)","max(yr)","max(ct)",
|
||||
"length(r)",paste("GM_B_msy",yr1,sep="_"),paste("GM_B_msySD",yr1,sep="_"),paste("M_B_Bmsy",yr1,sep="_"))
|
||||
|
||||
#add, adding "paste("GM_B_msySD",yr1,sep="_")"in the line above
|
||||
|
||||
######< Ye
|
||||
########################################################
|
||||
|
||||
## plot MSY over catch data
|
||||
pdf(paste(bb,"graph.pdf",sep="_"))
|
||||
|
||||
par(mfcol=c(2,3))
|
||||
plot(yr, ct, type="l", ylim = c(0, max(ct)), xlab = "Year",
|
||||
ylab = "Catch (1000 t)",main = paste("StockID",stock,sep=":"))
|
||||
abline(h=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(h=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(h=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
hist(r, freq=F, xlim=c(0, 1.2 * max(r)), main = "")
|
||||
abline(v=exp(mean(log(r))),col="red",lwd=2)
|
||||
abline(v=exp(mean(log(r))-2*sd(log(r))),col="red")
|
||||
abline(v=exp(mean(log(r))+2*sd(log(r))),col="red")
|
||||
|
||||
plot(r1, k1, xlim = start_r, ylim = start_k, xlab="r", ylab="k (1000t)")
|
||||
|
||||
hist(k, freq=F, xlim=c(0, 1.2 * max(k)), xlab="k (1000t)", main = "")
|
||||
abline(v=exp(mean(log(k))),col="red", lwd=2)
|
||||
abline(v=exp(mean(log(k))-2*sd(log(k))),col="red")
|
||||
abline(v=exp(mean(log(k))+2*sd(log(k))),col="red")
|
||||
|
||||
|
||||
plot(log(r), log(k),xlab="ln(r)",ylab="ln(k)")
|
||||
abline(v=mean(log(r)))
|
||||
abline(h=mean(log(k)))
|
||||
abline(mean(log(msy))+log(4),-1, col="red",lwd=2)
|
||||
abline(mean(log(msy))-2*sd(log(msy))+log(4),-1, col="red")
|
||||
abline(mean(log(msy))+2*sd(log(msy))+log(4),-1, col="red")
|
||||
|
||||
hist(msy, freq=F, xlim=c(0, 1.2 * max(msy)), xlab="MSY (1000t)",main = "")
|
||||
abline(v=exp(mean(log(msy))),col="red", lwd=2)
|
||||
abline(v=exp(mean_ln_msy - 2 * sd(log(msy))),col="red")
|
||||
abline(v=exp(mean_ln_msy + 2 * sd(log(msy))),col="red")
|
||||
|
||||
graphics.off()
|
||||
|
||||
|
||||
cat("Possible combinations = ", length(r),"\n")
|
||||
cat("geom. mean r =", format(exp(mean(log(r))),digits=3), "\n")
|
||||
cat("r +/- 2 SD =", format(exp(mean(log(r))-2*sd(log(r))),digits=3),"-",format(exp(mean(log(r))+2*sd(log(r))),digits=3), "\n")
|
||||
cat("geom. mean k =", format(1000*exp(mean(log(k))),digits=3), "\n")
|
||||
cat("k +/- 2 SD =", format(1000*exp(mean(log(k))-2*sd(log(k))),digits=3),"-",format(1000*exp(mean(log(k))+2*sd(log(k))),digits=3), "\n")
|
||||
cat("geom. mean MSY =", format(1000*exp(mean(log(msy))),digits=3),"\n")
|
||||
cat("MSY +/- 2 SD =", format(1000*exp(mean_ln_msy - 2 * sd(log(msy))),digits=3), "-", format(1000*exp(mean_ln_msy + 2 * sd(log(msy))),digits=3), "\n")
|
||||
|
||||
## Write results into outfile, in append mode (no header in file, existing files will be continued)
|
||||
## output = data.frame(stock, sigR, startbio[1], startbio[2], interbio[1], interbio[2], finalbio[1], finalbio[2], min(yr), max(yr), res, max(ct), ct[1], ct[nyr], length(r), exp(mean(log(r))), sd(log(r)), min(r), quantile(r,0.05), quantile(r,0.25), median(r), quantile(r,0.75), quantile(r,0.95), max(r), exp(mean(log(k))), sd(log(k)), min(k), quantile(k, 0.05), quantile(k, 0.25), median(k), quantile(k, 0.75), quantile(k, 0.95), max(k), exp(mean(log(msy))), sd(log(msy)), min(msy), quantile(msy, 0.05), quantile(msy, 0.25), median(msy), quantile(msy, 0.75), quantile(msy, 0.95), max(msy))
|
||||
|
||||
#write.table(output, file = outfile, append = TRUE, sep = ";", dec = ".", row.names = FALSE, col.names = FALSE)
|
||||
appendPar <- ifelse(counter2==0,F,T)
|
||||
colnamePar <- ifelse(counter2==0,T,F)
|
||||
write.table(output, file = outfile, append = appendPar, sep = ",", dec = ".",
|
||||
row.names = FALSE, col.names = colnamePar)
|
||||
|
||||
counter2 <- counter2 + 1
|
||||
|
||||
}
|
||||
cat("Elapsed: ",Sys.time()-t0," \n")
|
||||
} ## End of stock loop, get next stock or exit
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue