SlideShare a Scribd company logo
1 of 12
# Repository configuration

repository.name=Main Repository

# Directory configuration

dir.root=./alf_data

dir.contentstore=${dir.root}/contentstore
dir.contentstore.deleted=${dir.root}/contentstore.deleted

dir.auditcontentstore=${dir.root}/audit.contentstore

# The location for lucene index files
dir.indexes=${dir.root}/lucene-indexes

# The location for index backups
dir.indexes.backup=${dir.root}/backup-lucene-indexes

# The location for lucene index locks
dir.indexes.lock=${dir.indexes}/locks

# Is the JBPM Deploy Process Servlet enabled?
# Default is false. Should not be enabled in production environments as the
# servlet allows unauthenticated deployment of new workflows.
system.workflow.deployservlet.enabled=false

# Sets the location for the JBPM Configuration File
system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/
jbpm.cfg.xml

# ######################################### #
# Index Recovery and Tracking Configuration #
# ######################################### #
#
# Recovery types are:
#    NONE:     Ignore
#    VALIDATE: Checks that the first and last transaction for each store is represented in
the indexes
#    AUTO:     Validates and auto-recovers if validation fails
#    FULL:     Full index rebuild, processing all transactions in order. The server is
temporarily suspended.
index.recovery.mode=VALIDATE
# FULL recovery continues when encountering errors
index.recovery.stopOnError=false
index.recovery.maximumPoolSize=5
# Set the frequency with which the index tracking is triggered.
# For more information on index tracking in a cluster:
#    http://wiki.alfresco.com/wiki/
High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later
# By default, this is effectively never, but can be modified as required.
#    Examples:
#       Never:                   * * * * * ? 2099
#       Once every five seconds: 0/5 * * * * ?
#       Once every two seconds : 0/2 * * * * ?
#       See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
index.tracking.cronExpression=0/5 * * * * ?
index.tracking.adm.cronExpression=${index.tracking.cronExpression}
index.tracking.avm.cronExpression=${index.tracking.cronExpression}
# Other properties.
index.tracking.maxTxnDurationMinutes=10
index.tracking.reindexLagMs=1000
index.tracking.maxRecordSetSize=1000
index.tracking.maxTransactionsPerLuceneCommit=100
index.tracking.disableInTransactionIndexing=false
# Index tracking information of a certain age is cleaned out by a scheduled job.
# Any clustered system that has been offline for longer than this period will need to be
seeded
# with a more recent backup of the Lucene indexes or the indexes will have to be fully
rebuilt.
# Use -1 to disable purging. This can be switched on at any stage.
index.tracking.minRecordPurgeAgeDays=30

# Reindexing of missing content is by default 'never' carried out.
# The cron expression below can be changed to control the timing of this reindexing.
# Users of Enterprise Alfresco can configure this cron expression via JMX without a server
restart.
# Note that if alfresco.cluster.name is not set, then reindexing will not occur.
index.reindexMissingContent.cronExpression=* * * * * ? 2099

# Change the failure behaviour of the configuration checker
system.bootstrap.config_check.strict=true

# The name of the cluster
#   Leave this empty to disable cluster entry
alfresco.cluster.name=

# The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file
#   Use this property to set the hostname of the current server.
#   This is only necessary if the cache peer URLs are generated with an invalid IP address
for the local server.
alfresco.ehcache.rmi.hostname=
#   Use this property to set the cache peer URL port.
alfresco.ehcache.rmi.remoteObjectPort=0
alfresco.ehcache.rmi.port=0
alfresco.ehcache.rmi.socketTimeoutMillis=5000

# The protocol stack to use from the JGroups configuration file
#   Use this property to select which communication method should be used.
#   The JGroups configuration file is build up using the protocol string
alfresco.jgroups.defaultProtocol=UDP
# The bind address and interface for JGroups to use; equivalent to -Djgroups.bind_addr and
-Djgroups.bind_interface
alfresco.jgroups.bind_address=
alfresco.jgroups.bind_interface=
# JGroups configuration (http://www.jgroups.org)
# The location of the JGroups configuration file
alfresco.jgroups.configLocation=classpath:alfresco/jgroups/alfresco-jgroups-$
{alfresco.jgroups.defaultProtocol}.xml

#
# How long should shutdown wait to complete normally before
# taking stronger action and calling System.exit()
# in ms, 10,000 is 10 seconds
#
shutdown.backstop.timeout=10000
shutdown.backstop.enabled=false
# Server Single User Mode
# note:
#   only allow named user (note: if blank or not set then will allow all users)
#   assuming maxusers is not set to 0
#server.singleuseronly.name=admin

# Server Max Users - limit number of users with non-expired tickets
# note:
#   -1 allows any number of users, assuming not in single-user mode
#   0 prevents further logins, including the ability to enter single-user mode
server.maxusers=-1

# The Cron expression controlling the frequency with which the OpenOffice connection is
tested
openOffice.test.cronExpression=0 * * * * ?

#
# Disable all shared caches (mutable and immutable)
#    These properties are used for diagnostic purposes
system.cache.disableMutableSharedCaches=false
system.cache.disableImmutableSharedCaches=false

#
# Properties to limit resources spent on individual searches
#
# The maximum time spent pruning results
system.acl.maxPermissionCheckTimeMillis=10000
# The maximum number of results to perform permission checks against
system.acl.maxPermissionChecks=1000

# Properties to control read permission evaluation for acegi
system.readpermissions.optimise=true
system.readpermissions.bulkfetchsize=1000

#
# Manually control how the system handles maximum string lengths.
# Any zero or negative value is ignored.
# Only change this after consulting support or reading the appropriate Javadocs for
# org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2
system.maximumStringLength=-1

#
# Limit hibernate session size by trying to amalgamate events for the L2 session
invalidation
# - hibernate works as is up to this size
# - after the limit is hit events that can be grouped invalidate the L2 cache by type and
not instance
# events may not group if there are post action listener registered (this is not the case
with the default distribution)
system.hibernateMaxExecutions=20000

#
# Determine if modification timestamp propagation from child to parent nodes is respected
or not.
# Even if 'true', the functionality is only supported for child associations that declare
the
# 'propagateTimestamps' element in the dictionary definition.
system.enableTimestampPropagation=true
#
# Decide if content should be removed from the system immediately after being orphaned.
# Do not change this unless you have examined the impact it has on your backup procedures.
system.content.eagerOrphanCleanup=false
# The number of days to keep orphaned content in the content stores.
#    This has no effect on the 'deleted' content stores, which are not automatically
emptied.
system.content.orphanProtectDays=14
# The action to take when a store or stores fails to delete orphaned content
#    IGNORE: Just log a warning. The binary remains and the record is expunged
#    KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be
processed or removed.
system.content.deletionFailureAction=IGNORE
# The CRON expression to trigger the deletion of resources associated with orphaned
content.
system.content.orphanCleanup.cronExpression=0 0 4 * * ?
# The CRON expression to trigger content URL conversion. This process is not intesive and
can
#    be triggered on a live system. Similarly, it can be triggered using JMX on a
dedicated machine.
system.content.contentUrlConverter.cronExpression=* * * * * ? 2099
system.content.contentUrlConverter.threadCount=2
system.content.contentUrlConverter.batchSize=500
system.content.contentUrlConverter.runAsScheduledJob=false

# #################### #
# Lucene configuration #
# #################### #
#
# Millisecond threshold for text transformations
# Slower transformers will force the text extraction to be asynchronous
#
lucene.maxAtomicTransformationTime=20
#
# The maximum number of clauses that are allowed in a lucene query
#
lucene.query.maxClauses=10000
#
# The size of the queue of nodes waiting for index
# Events are generated as nodes are changed, this is the maximum size of the queue used to
coalesce event
# When this size is reached the lists of nodes will be indexed
#
# http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of
1.4.3.
#
lucene.indexer.batchSize=1000000
fts.indexer.batchSize=1000
#
# Index cache sizes
#
lucene.indexer.cacheEnabled=true
lucene.indexer.maxDocIdCacheSize=100000
lucene.indexer.maxDocumentCacheSize=100
lucene.indexer.maxIsCategoryCacheSize=-1
lucene.indexer.maxLinkAspectCacheSize=10000
lucene.indexer.maxParentCacheSize=100000
lucene.indexer.maxPathCacheSize=100000
lucene.indexer.maxTypeCacheSize=10000
#
# Properties for merge (not this does not affect the final index segment which will be
optimised)
# Max merge docs only applies to the merge process not the resulting index which will be
optimised.
#
lucene.indexer.mergerMaxMergeDocs=1000000
lucene.indexer.mergerMergeFactor=5
lucene.indexer.mergerMaxBufferedDocs=-1
lucene.indexer.mergerRamBufferSizeMb=16
#
# Properties for delta indexes (not this does not affect the final index segment which
will be optimised)
# Max merge docs only applies to the index building process not the resulting index which
will be optimised.
#
lucene.indexer.writerMaxMergeDocs=1000000
lucene.indexer.writerMergeFactor=5
lucene.indexer.writerMaxBufferedDocs=-1
lucene.indexer.writerRamBufferSizeMb=16
#
# Target number of indexes and deltas in the overall index and what index size to merge in
memory
#
lucene.indexer.mergerTargetIndexCount=8
lucene.indexer.mergerTargetOverlayCount=5
lucene.indexer.mergerTargetOverlaysBlockingFactor=2
lucene.indexer.maxDocsForInMemoryMerge=60000
lucene.indexer.maxRamInMbForInMemoryMerge=16
lucene.indexer.maxDocsForInMemoryIndex=60000
lucene.indexer.maxRamInMbForInMemoryIndex=16
#
# Other lucene properties
#
lucene.indexer.termIndexInterval=128
lucene.indexer.useNioMemoryMapping=true
# over-ride to false for pre 3.0 behaviour
lucene.indexer.postSortDateTime=true
lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL
lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL
#
# The number of terms from a document that will be indexed
#
lucene.indexer.maxFieldLength=10000

# Should we use a 'fair' locking policy, giving queue-like access behaviour to
# the indexes and avoiding starvation of waiting writers? Set to false on old
# JVMs where this appears to cause deadlock
lucene.indexer.fairLocking=true

#
# Index locks (mostly deprecated and will be tidied up with the next lucene upgrade)
#
lucene.write.lock.timeout=10000
lucene.commit.lock.timeout=100000
lucene.lock.poll.interval=100

# When transforming archive files (.zip etc) into text representations (such as
# for full text indexing), should the files within the archive be processed too?
# If enabled, transformation takes longer, but searches of the files find more.
transformer.Archive.includeContents=false

# Database configuration
db.schema.stopAfterSchemaBootstrap=false
db.schema.update=true
db.schema.update.lockRetryCount=24
db.schema.update.lockRetryWaitSeconds=5
db.driver=org.gjt.mm.mysql.Driver
db.name=alfresco
db.url=jdbc:mysql:///${db.name}
db.username=alfresco
db.password=alfresco
db.pool.initial=10
db.pool.max=40
db.txn.isolation=-1
db.pool.statements.enable=true
db.pool.statements.max=40
db.pool.min=0
db.pool.idle=-1
db.pool.wait.max=-1
db.pool.validate.query=
db.pool.evict.interval=-1
db.pool.evict.idle.min=1800000
db.pool.validate.borrow=true
db.pool.validate.return=false
db.pool.evict.validate=false
#
db.pool.abandoned.detect=false
db.pool.abandoned.time=300
#
# db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/
configuration.html)
# and also requires db.pool.abandoned.detect=true (removeAbandoned)
#
db.pool.abandoned.log=false



# Audit configuration
audit.enabled=true
audit.tagging.enabled=true
audit.alfresco-access.enabled=false
audit.alfresco-access.sub-events.enabled=false
audit.cmischangelog.enabled=false
audit.dod5015.enabled=false
# Setting this flag to true will force startup failure when invalid audit configurations
are detected
audit.config.strict=false
# Audit map filter for AccessAuditor - restricts recorded events to user driven events
audit.filter.alfresco-access.default.enabled=true
audit.filter.alfresco-access.transaction.user=~System;~null;.*
audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site
audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.*



# System Configuration
system.store=system://system
system.descriptor.childname=sys:descriptor
system.descriptor.current.childname=sys:descriptor-current
# User config
alfresco_user_store.store=user://alfrescoUserStore
alfresco_user_store.system_container.childname=sys:system
alfresco_user_store.user_container.childname=sys:people

# note: default admin username - should not be changed after installation
alfresco_user_store.adminusername=admin

# Initial password - editing this will not have any effect once the repository is
installed
alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634

# note: default guest username - should not be changed after installation
alfresco_user_store.guestusername=guest

# Used to move home folders to a new location
home_folder_provider_synchronizer.enabled=false
home_folder_provider_synchronizer.override_provider=
home_folder_provider_synchronizer.keep_empty_parents=false

# Spaces Archive Configuration
spaces.archive.store=archive://SpacesStore

# Spaces Configuration
spaces.store=workspace://SpacesStore
spaces.company_home.childname=app:company_home
spaces.guest_home.childname=app:guest_home
spaces.dictionary.childname=app:dictionary
spaces.templates.childname=app:space_templates
spaces.imapConfig.childname=app:imap_configs
spaces.imap_templates.childname=app:imap_templates
spaces.scheduled_actions.childname=cm:Scheduled Actions
spaces.emailActions.childname=app:email_actions
spaces.searchAction.childname=cm:search
spaces.templates.content.childname=app:content_templates
spaces.templates.email.childname=app:email_templates
spaces.templates.email.invite1.childname=app:invite_email_templates
spaces.templates.email.notify.childname=app:notify_email_templates
spaces.templates.rss.childname=app:rss_templates
spaces.savedsearches.childname=app:saved_searches
spaces.scripts.childname=app:scripts
spaces.wcm.childname=app:wcm
spaces.wcm_content_forms.childname=app:wcm_forms
spaces.content_forms.childname=app:forms
spaces.user_homes.childname=app:user_homes
spaces.user_homes.regex.key=userName
spaces.user_homes.regex.pattern=
spaces.user_homes.regex.group_order=
spaces.sites.childname=st:sites
spaces.templates.email.invite.childname=cm:invite
spaces.rendition.rendering_actions.childname=app:rendering_actions
spaces.replication.replication_actions.childname=app:replication_actions
spaces.wcm_deployed.childname=cm:wcm_deployed
spaces.transfers.childname=app:transfers
spaces.transfer_groups.childname=app:transfer_groups
spaces.transfer_temp.childname=app:temp
spaces.inbound_transfer_records.childname=app:inbound_transfer_records
spaces.models.childname=app:models
spaces.workflow.definitions.childname=app:workflow_defs
spaces.webscripts.childname=cm:webscripts
spaces.extension_webscripts.childname=cm:extensionwebscripts



# ADM VersionStore Configuration
version.store.enableAutoVersioning=true
version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore
version.store.version2Store=workspace://version2Store

version.store.migrateVersionStore.threadCount=3
version.store.migrateVersionStore.batchSize=1

version.store.migrateCleanupJob.threadCount=3
version.store.migrateCleanupJob.batchSize=1



# WARNING: For non-production testing only !!! Do not change (to avoid version store
issues, including possible mismatch). Should be false since lightWeightVersionStore is
deprecated.
version.store.onlyUseDeprecatedV1=false

# The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x)
# By default, this is effectively 'never' but can be modified as required.
#    Examples:
#       Never:                     * * * * * ? 2099
#       Once every thirty minutes: 0 0/30 * * * ?
#       See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html
version.store.migrateVersionStore.cronExpression=* * * * * ? 2099
# Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note:
if limit > 0 then need to schedule job to run regularly in order to complete the
migration.
version.store.migrateVersionStore.limitPerJobCycle=-1
version.store.migrateVersionStore.runAsScheduledJob=false

# Folders for storing people
system.system_container.childname=sys:system
system.people_container.childname=sys:people
system.authorities_container.childname=sys:authorities
system.zones_container.childname=sys:zones

# Folders for storing workflow related info
system.workflow_container.childname=sys:workflow

# Are user names case sensitive?
user.name.caseSensitive=false
domain.name.caseSensitive=false
domain.separator=

# AVM Specific properties.
avm.remote.idlestream.timeout=30000

#Format caption extracted from the XML Schema.
xforms.formatCaption=true

# ECM content usages/quotas
system.usages.enabled=true
system.usages.clearBatchSize=50
system.usages.updateBatchSize=50
# Repository endpoint - used by Activity Service
repo.remote.url=http://localhost:8080/alfresco
repo.remote.endpoint=/service

# Create home folders as people are created (true) or create them lazily (false)
home.folder.creation.eager=true

# Should we consider zero byte content to be the same as no content when firing
# content update policies? Prevents 'premature' firing of inbound content rules
# for some clients such as Mac OS X Finder
policy.content.update.ignoreEmpty=false

# The well known RMI registry port and external host name published in the stubs
# is defined in the alfresco-shared.properties file
#
# alfresco.rmi.services.port=50500

# Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all
adapters'.
# This allows connections to JMX both remotely and locally.
#
alfresco.rmi.services.host=0.0.0.0

# RMI service ports for the individual services.
# These eight services are available remotely.
#
# Assign individual ports for each service for best performance
# or run several services on the same port, you can even run everything on 50500 if
# running through a firewall.
#
# Specify 0 to use a random unused port.
#
avm.rmi.service.port=50501
avmsync.rmi.service.port=50502
authentication.rmi.service.port=50504
repo.rmi.service.port=50505
action.rmi.service.port=50506
deployment.rmi.service.port=50507
monitor.rmi.service.port=50508



# Should the Mbean server bind to an existing server.   Set to true for most application
servers.
# false for WebSphere clusters.
mbean.server.locateExistingServerIfPossible=true

# External executable locations
ooo.exe=soffice
ooo.user=${dir.root}/oouser
img.root=./ImageMagick
img.dyn=${img.root}/lib
img.exe=${img.root}/bin/convert
swf.exe=./bin/pdf2swf

# Configuration for handling of failing thumbnails.
# See NodeEligibleForRethumbnailingEvaluator's javadoc for details.
#
# Retry periods limit the frequency with which the repository will attempt to create Share
thumbnails
# for content nodes which have previously failed in their thumbnail attempts.
# These periods are in seconds.
#
# 604800s = 60s * 60m * 24h * 7d = 1 week
system.thumbnail.retryPeriod=60
system.thumbnail.retryCount=2
system.thumbnail.quietPeriod=604800
system.thumbnail.quietPeriodRetriesEnabled=true

# Property to enable upgrade from 2.1-A
V2.1-A.fixes.to.schema=0
#V2.1-A.fixes.to.schema=82

# The default authentication chain
authentication.chain=alfrescoNtlm1:alfrescoNtlm

# Do authentication tickets expire or live for ever?
authentication.ticket.ticketsExpire=false

# If ticketsEpire is true then how they should expire?
# Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE
# The default is AFTER_FIXED_TIME
authentication.ticket.expiryMode=AFTER_FIXED_TIME

# If authentication.ticket.ticketsExpire is true and
# authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY,
# this controls the minimum period for which tickets are valid.
# The default is PT1H for one hour.
authentication.ticket.validDuration=PT1H

# Default NFS user mappings (empty). Note these users will be able to
# authenticate through NFS without password so ensure NFS port is secure before
# enabling and adding mappings
nfs.enabled=false
nfs.user.mappings=
nfs.user.mappings.default.uid=0
nfs.user.mappings.default.gid=0

#Example NFS user mappings
#nfs.user.mappings=admin,user1
#nfs.user.mappings.value.admin.uid=0
#nfs.user.mappings.value.admin.gid=0
#nfs.user.mappings.value.user1.uid=500
#nfs.user.mappings.value.user1.gid=500

# IMAP
imap.server.enabled=false
imap.server.port=143
imap.server.attachments.extraction.enabled=true

# Default IMAP mount points
imap.config.home.store=${spaces.store}
imap.config.home.rootPath=/${spaces.company_home.childname}
imap.config.home.folderPath=Imap Home
imap.config.server.mountPoints=AlfrescoIMAP
imap.config.server.mountPoints.default.mountPointName=IMAP
imap.config.server.mountPoints.default.modeName=ARCHIVE
imap.config.server.mountPoints.default.store=${spaces.store}
imap.config.server.mountPoints.default.rootPath=/${spaces.company_home.childname}
imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP
imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED

# Activity feed max size and max age (eg. 44640 mins = 31 days)
activities.feed.max.size=100
activities.feed.max.age.mins=44640

# Subsystem unit test values. Will not have any effect on production servers
subsystems.test.beanProp.default.longProperty=123456789123456789
subsystems.test.beanProp.default.anotherStringProperty=Global Default
subsystems.test.beanProp=inst1,inst2,inst3
subsystems.test.beanProp.value.inst2.boolProperty=true
subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default
subsystems.test.simpleProp2=true
subsystems.test.simpleProp3=Global Default3

# Deployment Service
deployment.service.numberOfSendingThreads=5
deployment.service.corePoolSize=2
deployment.service.maximumPoolSize=3
# How long to wait in mS before refreshing a target lock - detects shutdown servers
deployment.service.targetLockRefreshTime=60000
# How long to wait in mS from the last communication before deciding that deployment has
failed, possibly
# the destination is no longer available?
deployment.service.targetLockTimeout=3600000

#Invitation Service
# Should send emails as part of invitation process.
notification.email.siteinvite=true

# Transfer Service
transferservice.receiver.enabled=true
transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging
#
# How long to wait in mS before refreshing a transfer lock - detects shutdown servers
# Default 1 minute.
transferservice.receiver.lockRefreshTime=60000
#
# How many times to attempt retry the transfer lock
transferservice.receiver.lockRetryCount=3
# How long to wait, in mS, before retrying the transfer lock
transferservice.receiver.lockRetryWait=100
#
# How long to wait, in mS, since the last contact with from the client before
# timing out a transfer.   Needs to be long enough to cope with network delays and
"thinking
# time" for both source and destination.    Default 5 minutes.
transferservice.receiver.lockTimeOut=300000

; DM Receiever Properties
;
; The name of the DM Receiver target - you deploy to this target name
deployment.dmr.name=alfresco

; consolidate staging, author and workflow sandboxes to one
deployment.dmr.consolidate=true
; The name of the Alfresco receiver targer
deployment.avm.name=avm

;Where should the root of the web project be stored, by default /www/avm_webapps
deployment.avm.rootPath=/www/avm_webapps

; Pattern for live stores deployment by the alfresco receiver
deployment.avm.storeNamePattern=%storeName%-live

; Built in deployment receiver properties for the default
; filesystem receiver

; filesystem receiver configuration
deployment.filesystem.rootdir=./wcm
deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata
deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog
deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata

deployment.filesystem.autofix=true
deployment.filesystem.errorOnOverwrite=false

; default filesystem target configuration
deployment.filesystem.default.rootdir=./www
deployment.filesystem.default.name=filesystem
deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default

More Related Content

What's hot

Kafka replication apachecon_2013
Kafka replication apachecon_2013Kafka replication apachecon_2013
Kafka replication apachecon_2013
Jun Rao
 
In-memory Caching in HDFS: Lower Latency, Same Great Taste
In-memory Caching in HDFS: Lower Latency, Same Great TasteIn-memory Caching in HDFS: Lower Latency, Same Great Taste
In-memory Caching in HDFS: Lower Latency, Same Great Taste
DataWorks Summit
 

What's hot (20)

HBaseCon 2015: HBase Performance Tuning @ Salesforce
HBaseCon 2015: HBase Performance Tuning @ SalesforceHBaseCon 2015: HBase Performance Tuning @ Salesforce
HBaseCon 2015: HBase Performance Tuning @ Salesforce
 
Using Apache Spark as ETL engine. Pros and Cons
Using Apache Spark as ETL engine. Pros and Cons          Using Apache Spark as ETL engine. Pros and Cons
Using Apache Spark as ETL engine. Pros and Cons
 
AWS Aurora 운영사례 (by 배은미)
AWS Aurora 운영사례 (by 배은미)AWS Aurora 운영사례 (by 배은미)
AWS Aurora 운영사례 (by 배은미)
 
How to Actually Tune Your Spark Jobs So They Work
How to Actually Tune Your Spark Jobs So They WorkHow to Actually Tune Your Spark Jobs So They Work
How to Actually Tune Your Spark Jobs So They Work
 
Firebird's Big Databases (in English)
Firebird's Big Databases (in English)Firebird's Big Databases (in English)
Firebird's Big Databases (in English)
 
Lessons from the Field: Applying Best Practices to Your Apache Spark Applicat...
Lessons from the Field: Applying Best Practices to Your Apache Spark Applicat...Lessons from the Field: Applying Best Practices to Your Apache Spark Applicat...
Lessons from the Field: Applying Best Practices to Your Apache Spark Applicat...
 
Tanel Poder - Troubleshooting Complex Oracle Performance Issues - Part 1
Tanel Poder - Troubleshooting Complex Oracle Performance Issues - Part 1Tanel Poder - Troubleshooting Complex Oracle Performance Issues - Part 1
Tanel Poder - Troubleshooting Complex Oracle Performance Issues - Part 1
 
Kafka replication apachecon_2013
Kafka replication apachecon_2013Kafka replication apachecon_2013
Kafka replication apachecon_2013
 
Dependency Injection in Apache Spark Applications
Dependency Injection in Apache Spark ApplicationsDependency Injection in Apache Spark Applications
Dependency Injection in Apache Spark Applications
 
Hadoop World 2011: Hadoop Troubleshooting 101 - Kate Ting - Cloudera
Hadoop World 2011: Hadoop Troubleshooting 101 - Kate Ting - ClouderaHadoop World 2011: Hadoop Troubleshooting 101 - Kate Ting - Cloudera
Hadoop World 2011: Hadoop Troubleshooting 101 - Kate Ting - Cloudera
 
Gfs vs hdfs
Gfs vs hdfsGfs vs hdfs
Gfs vs hdfs
 
Making MySQL highly available using Oracle Grid Infrastructure
Making MySQL highly available using Oracle Grid InfrastructureMaking MySQL highly available using Oracle Grid Infrastructure
Making MySQL highly available using Oracle Grid Infrastructure
 
Introducing Galera 3.0
Introducing Galera 3.0Introducing Galera 3.0
Introducing Galera 3.0
 
Intro to the Alfresco Public API
Intro to the Alfresco Public APIIntro to the Alfresco Public API
Intro to the Alfresco Public API
 
Deep Dive on Amazon Aurora MySQL Performance Tuning (DAT429-R1) - AWS re:Inve...
Deep Dive on Amazon Aurora MySQL Performance Tuning (DAT429-R1) - AWS re:Inve...Deep Dive on Amazon Aurora MySQL Performance Tuning (DAT429-R1) - AWS re:Inve...
Deep Dive on Amazon Aurora MySQL Performance Tuning (DAT429-R1) - AWS re:Inve...
 
A Closer Look at Apache Kudu
A Closer Look at Apache KuduA Closer Look at Apache Kudu
A Closer Look at Apache Kudu
 
Data discovery & metadata management (amundsen installation)
Data discovery & metadata management (amundsen installation)Data discovery & metadata management (amundsen installation)
Data discovery & metadata management (amundsen installation)
 
Bloat and Fragmentation in PostgreSQL
Bloat and Fragmentation in PostgreSQLBloat and Fragmentation in PostgreSQL
Bloat and Fragmentation in PostgreSQL
 
In-memory Caching in HDFS: Lower Latency, Same Great Taste
In-memory Caching in HDFS: Lower Latency, Same Great TasteIn-memory Caching in HDFS: Lower Latency, Same Great Taste
In-memory Caching in HDFS: Lower Latency, Same Great Taste
 
Everything You Always Wanted to Know About Kafka’s Rebalance Protocol but Wer...
Everything You Always Wanted to Know About Kafka’s Rebalance Protocol but Wer...Everything You Always Wanted to Know About Kafka’s Rebalance Protocol but Wer...
Everything You Always Wanted to Know About Kafka’s Rebalance Protocol but Wer...
 

Similar to alfresco-global.properties-COMPLETO-3.4.6

X64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 newX64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 new
Yiwei Ma
 
Penetration Testing for Easy RM to MP3 Converter Application and Post Exploit
Penetration Testing for Easy RM to MP3 Converter Application and Post ExploitPenetration Testing for Easy RM to MP3 Converter Application and Post Exploit
Penetration Testing for Easy RM to MP3 Converter Application and Post Exploit
JongWon Kim
 
PuppetDB: Sneaking Clojure into Operations
PuppetDB: Sneaking Clojure into OperationsPuppetDB: Sneaking Clojure into Operations
PuppetDB: Sneaking Clojure into Operations
grim_radical
 

Similar to alfresco-global.properties-COMPLETO-3.4.6 (20)

Kafka and kafka connect
Kafka and kafka connectKafka and kafka connect
Kafka and kafka connect
 
GUC Tutorial Package (9.0)
GUC Tutorial Package (9.0)GUC Tutorial Package (9.0)
GUC Tutorial Package (9.0)
 
X64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 newX64服务器 lnmp服务器部署标准 new
X64服务器 lnmp服务器部署标准 new
 
Squid3
Squid3Squid3
Squid3
 
Squid3
Squid3Squid3
Squid3
 
Introduction to Apache Mesos
Introduction to Apache MesosIntroduction to Apache Mesos
Introduction to Apache Mesos
 
Penetration Testing for Easy RM to MP3 Converter Application and Post Exploit
Penetration Testing for Easy RM to MP3 Converter Application and Post ExploitPenetration Testing for Easy RM to MP3 Converter Application and Post Exploit
Penetration Testing for Easy RM to MP3 Converter Application and Post Exploit
 
Httpd.conf
Httpd.confHttpd.conf
Httpd.conf
 
Ansible: How to Get More Sleep and Require Less Coffee
Ansible: How to Get More Sleep and Require Less CoffeeAnsible: How to Get More Sleep and Require Less Coffee
Ansible: How to Get More Sleep and Require Less Coffee
 
Writing & Sharing Great Modules on the Puppet Forge
Writing & Sharing Great Modules on the Puppet ForgeWriting & Sharing Great Modules on the Puppet Forge
Writing & Sharing Great Modules on the Puppet Forge
 
Cloud patterns applied
Cloud patterns appliedCloud patterns applied
Cloud patterns applied
 
Symfony finally swiped right on envvars
Symfony finally swiped right on envvarsSymfony finally swiped right on envvars
Symfony finally swiped right on envvars
 
Null Bachaav - May 07 Attack Monitoring workshop.
Null Bachaav - May 07 Attack Monitoring workshop.Null Bachaav - May 07 Attack Monitoring workshop.
Null Bachaav - May 07 Attack Monitoring workshop.
 
Apache Flink Hands On
Apache Flink Hands OnApache Flink Hands On
Apache Flink Hands On
 
linux_Commads
linux_Commadslinux_Commads
linux_Commads
 
PuppetDB: Sneaking Clojure into Operations
PuppetDB: Sneaking Clojure into OperationsPuppetDB: Sneaking Clojure into Operations
PuppetDB: Sneaking Clojure into Operations
 
Cloud Meetup - Automation in the Cloud
Cloud Meetup - Automation in the CloudCloud Meetup - Automation in the Cloud
Cloud Meetup - Automation in the Cloud
 
PostgreSQL High_Performance_Cheatsheet
PostgreSQL High_Performance_CheatsheetPostgreSQL High_Performance_Cheatsheet
PostgreSQL High_Performance_Cheatsheet
 
An Ensemble Core with Docker - Solving a Real Pain in the PaaS
An Ensemble Core with Docker - Solving a Real Pain in the PaaS An Ensemble Core with Docker - Solving a Real Pain in the PaaS
An Ensemble Core with Docker - Solving a Real Pain in the PaaS
 
Unlocked Nov 2013: Main Slide Pack
Unlocked Nov 2013: Main Slide PackUnlocked Nov 2013: Main Slide Pack
Unlocked Nov 2013: Main Slide Pack
 

alfresco-global.properties-COMPLETO-3.4.6

  • 1. # Repository configuration repository.name=Main Repository # Directory configuration dir.root=./alf_data dir.contentstore=${dir.root}/contentstore dir.contentstore.deleted=${dir.root}/contentstore.deleted dir.auditcontentstore=${dir.root}/audit.contentstore # The location for lucene index files dir.indexes=${dir.root}/lucene-indexes # The location for index backups dir.indexes.backup=${dir.root}/backup-lucene-indexes # The location for lucene index locks dir.indexes.lock=${dir.indexes}/locks # Is the JBPM Deploy Process Servlet enabled? # Default is false. Should not be enabled in production environments as the # servlet allows unauthenticated deployment of new workflows. system.workflow.deployservlet.enabled=false # Sets the location for the JBPM Configuration File system.workflow.jbpm.config.location=classpath:org/alfresco/repo/workflow/jbpm/ jbpm.cfg.xml # ######################################### # # Index Recovery and Tracking Configuration # # ######################################### # # # Recovery types are: # NONE: Ignore # VALIDATE: Checks that the first and last transaction for each store is represented in the indexes # AUTO: Validates and auto-recovers if validation fails # FULL: Full index rebuild, processing all transactions in order. The server is temporarily suspended. index.recovery.mode=VALIDATE # FULL recovery continues when encountering errors index.recovery.stopOnError=false index.recovery.maximumPoolSize=5 # Set the frequency with which the index tracking is triggered. # For more information on index tracking in a cluster: # http://wiki.alfresco.com/wiki/ High_Availability_Configuration_V1.4_to_V2.1#Version_1.4.5.2C_2.1.1_and_later # By default, this is effectively never, but can be modified as required. # Examples: # Never: * * * * * ? 2099 # Once every five seconds: 0/5 * * * * ? # Once every two seconds : 0/2 * * * * ? # See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html index.tracking.cronExpression=0/5 * * * * ? index.tracking.adm.cronExpression=${index.tracking.cronExpression} index.tracking.avm.cronExpression=${index.tracking.cronExpression}
  • 2. # Other properties. index.tracking.maxTxnDurationMinutes=10 index.tracking.reindexLagMs=1000 index.tracking.maxRecordSetSize=1000 index.tracking.maxTransactionsPerLuceneCommit=100 index.tracking.disableInTransactionIndexing=false # Index tracking information of a certain age is cleaned out by a scheduled job. # Any clustered system that has been offline for longer than this period will need to be seeded # with a more recent backup of the Lucene indexes or the indexes will have to be fully rebuilt. # Use -1 to disable purging. This can be switched on at any stage. index.tracking.minRecordPurgeAgeDays=30 # Reindexing of missing content is by default 'never' carried out. # The cron expression below can be changed to control the timing of this reindexing. # Users of Enterprise Alfresco can configure this cron expression via JMX without a server restart. # Note that if alfresco.cluster.name is not set, then reindexing will not occur. index.reindexMissingContent.cronExpression=* * * * * ? 2099 # Change the failure behaviour of the configuration checker system.bootstrap.config_check.strict=true # The name of the cluster # Leave this empty to disable cluster entry alfresco.cluster.name= # The EHCache RMI peer URL addresses to set in the ehcache-custom.xml file # Use this property to set the hostname of the current server. # This is only necessary if the cache peer URLs are generated with an invalid IP address for the local server. alfresco.ehcache.rmi.hostname= # Use this property to set the cache peer URL port. alfresco.ehcache.rmi.remoteObjectPort=0 alfresco.ehcache.rmi.port=0 alfresco.ehcache.rmi.socketTimeoutMillis=5000 # The protocol stack to use from the JGroups configuration file # Use this property to select which communication method should be used. # The JGroups configuration file is build up using the protocol string alfresco.jgroups.defaultProtocol=UDP # The bind address and interface for JGroups to use; equivalent to -Djgroups.bind_addr and -Djgroups.bind_interface alfresco.jgroups.bind_address= alfresco.jgroups.bind_interface= # JGroups configuration (http://www.jgroups.org) # The location of the JGroups configuration file alfresco.jgroups.configLocation=classpath:alfresco/jgroups/alfresco-jgroups-$ {alfresco.jgroups.defaultProtocol}.xml # # How long should shutdown wait to complete normally before # taking stronger action and calling System.exit() # in ms, 10,000 is 10 seconds # shutdown.backstop.timeout=10000 shutdown.backstop.enabled=false
  • 3. # Server Single User Mode # note: # only allow named user (note: if blank or not set then will allow all users) # assuming maxusers is not set to 0 #server.singleuseronly.name=admin # Server Max Users - limit number of users with non-expired tickets # note: # -1 allows any number of users, assuming not in single-user mode # 0 prevents further logins, including the ability to enter single-user mode server.maxusers=-1 # The Cron expression controlling the frequency with which the OpenOffice connection is tested openOffice.test.cronExpression=0 * * * * ? # # Disable all shared caches (mutable and immutable) # These properties are used for diagnostic purposes system.cache.disableMutableSharedCaches=false system.cache.disableImmutableSharedCaches=false # # Properties to limit resources spent on individual searches # # The maximum time spent pruning results system.acl.maxPermissionCheckTimeMillis=10000 # The maximum number of results to perform permission checks against system.acl.maxPermissionChecks=1000 # Properties to control read permission evaluation for acegi system.readpermissions.optimise=true system.readpermissions.bulkfetchsize=1000 # # Manually control how the system handles maximum string lengths. # Any zero or negative value is ignored. # Only change this after consulting support or reading the appropriate Javadocs for # org.alfresco.repo.domain.schema.SchemaBootstrap for V2.1.2 system.maximumStringLength=-1 # # Limit hibernate session size by trying to amalgamate events for the L2 session invalidation # - hibernate works as is up to this size # - after the limit is hit events that can be grouped invalidate the L2 cache by type and not instance # events may not group if there are post action listener registered (this is not the case with the default distribution) system.hibernateMaxExecutions=20000 # # Determine if modification timestamp propagation from child to parent nodes is respected or not. # Even if 'true', the functionality is only supported for child associations that declare the # 'propagateTimestamps' element in the dictionary definition. system.enableTimestampPropagation=true
  • 4. # # Decide if content should be removed from the system immediately after being orphaned. # Do not change this unless you have examined the impact it has on your backup procedures. system.content.eagerOrphanCleanup=false # The number of days to keep orphaned content in the content stores. # This has no effect on the 'deleted' content stores, which are not automatically emptied. system.content.orphanProtectDays=14 # The action to take when a store or stores fails to delete orphaned content # IGNORE: Just log a warning. The binary remains and the record is expunged # KEEP_URL: Log a warning and create a URL entry with orphan time 0. It won't be processed or removed. system.content.deletionFailureAction=IGNORE # The CRON expression to trigger the deletion of resources associated with orphaned content. system.content.orphanCleanup.cronExpression=0 0 4 * * ? # The CRON expression to trigger content URL conversion. This process is not intesive and can # be triggered on a live system. Similarly, it can be triggered using JMX on a dedicated machine. system.content.contentUrlConverter.cronExpression=* * * * * ? 2099 system.content.contentUrlConverter.threadCount=2 system.content.contentUrlConverter.batchSize=500 system.content.contentUrlConverter.runAsScheduledJob=false # #################### # # Lucene configuration # # #################### # # # Millisecond threshold for text transformations # Slower transformers will force the text extraction to be asynchronous # lucene.maxAtomicTransformationTime=20 # # The maximum number of clauses that are allowed in a lucene query # lucene.query.maxClauses=10000 # # The size of the queue of nodes waiting for index # Events are generated as nodes are changed, this is the maximum size of the queue used to coalesce event # When this size is reached the lists of nodes will be indexed # # http://issues.alfresco.com/browse/AR-1280: Setting this high is the workaround as of 1.4.3. # lucene.indexer.batchSize=1000000 fts.indexer.batchSize=1000 # # Index cache sizes # lucene.indexer.cacheEnabled=true lucene.indexer.maxDocIdCacheSize=100000 lucene.indexer.maxDocumentCacheSize=100 lucene.indexer.maxIsCategoryCacheSize=-1 lucene.indexer.maxLinkAspectCacheSize=10000 lucene.indexer.maxParentCacheSize=100000 lucene.indexer.maxPathCacheSize=100000 lucene.indexer.maxTypeCacheSize=10000
  • 5. # # Properties for merge (not this does not affect the final index segment which will be optimised) # Max merge docs only applies to the merge process not the resulting index which will be optimised. # lucene.indexer.mergerMaxMergeDocs=1000000 lucene.indexer.mergerMergeFactor=5 lucene.indexer.mergerMaxBufferedDocs=-1 lucene.indexer.mergerRamBufferSizeMb=16 # # Properties for delta indexes (not this does not affect the final index segment which will be optimised) # Max merge docs only applies to the index building process not the resulting index which will be optimised. # lucene.indexer.writerMaxMergeDocs=1000000 lucene.indexer.writerMergeFactor=5 lucene.indexer.writerMaxBufferedDocs=-1 lucene.indexer.writerRamBufferSizeMb=16 # # Target number of indexes and deltas in the overall index and what index size to merge in memory # lucene.indexer.mergerTargetIndexCount=8 lucene.indexer.mergerTargetOverlayCount=5 lucene.indexer.mergerTargetOverlaysBlockingFactor=2 lucene.indexer.maxDocsForInMemoryMerge=60000 lucene.indexer.maxRamInMbForInMemoryMerge=16 lucene.indexer.maxDocsForInMemoryIndex=60000 lucene.indexer.maxRamInMbForInMemoryIndex=16 # # Other lucene properties # lucene.indexer.termIndexInterval=128 lucene.indexer.useNioMemoryMapping=true # over-ride to false for pre 3.0 behaviour lucene.indexer.postSortDateTime=true lucene.indexer.defaultMLIndexAnalysisMode=EXACT_LANGUAGE_AND_ALL lucene.indexer.defaultMLSearchAnalysisMode=EXACT_LANGUAGE_AND_ALL # # The number of terms from a document that will be indexed # lucene.indexer.maxFieldLength=10000 # Should we use a 'fair' locking policy, giving queue-like access behaviour to # the indexes and avoiding starvation of waiting writers? Set to false on old # JVMs where this appears to cause deadlock lucene.indexer.fairLocking=true # # Index locks (mostly deprecated and will be tidied up with the next lucene upgrade) # lucene.write.lock.timeout=10000 lucene.commit.lock.timeout=100000 lucene.lock.poll.interval=100 # When transforming archive files (.zip etc) into text representations (such as # for full text indexing), should the files within the archive be processed too?
  • 6. # If enabled, transformation takes longer, but searches of the files find more. transformer.Archive.includeContents=false # Database configuration db.schema.stopAfterSchemaBootstrap=false db.schema.update=true db.schema.update.lockRetryCount=24 db.schema.update.lockRetryWaitSeconds=5 db.driver=org.gjt.mm.mysql.Driver db.name=alfresco db.url=jdbc:mysql:///${db.name} db.username=alfresco db.password=alfresco db.pool.initial=10 db.pool.max=40 db.txn.isolation=-1 db.pool.statements.enable=true db.pool.statements.max=40 db.pool.min=0 db.pool.idle=-1 db.pool.wait.max=-1 db.pool.validate.query= db.pool.evict.interval=-1 db.pool.evict.idle.min=1800000 db.pool.validate.borrow=true db.pool.validate.return=false db.pool.evict.validate=false # db.pool.abandoned.detect=false db.pool.abandoned.time=300 # # db.pool.abandoned.log=true (logAbandoned) adds overhead (http://commons.apache.org/dbcp/ configuration.html) # and also requires db.pool.abandoned.detect=true (removeAbandoned) # db.pool.abandoned.log=false # Audit configuration audit.enabled=true audit.tagging.enabled=true audit.alfresco-access.enabled=false audit.alfresco-access.sub-events.enabled=false audit.cmischangelog.enabled=false audit.dod5015.enabled=false # Setting this flag to true will force startup failure when invalid audit configurations are detected audit.config.strict=false # Audit map filter for AccessAuditor - restricts recorded events to user driven events audit.filter.alfresco-access.default.enabled=true audit.filter.alfresco-access.transaction.user=~System;~null;.* audit.filter.alfresco-access.transaction.type=cm:folder;cm:content;st:site audit.filter.alfresco-access.transaction.path=~/sys:archivedItem;~/ver:;.* # System Configuration system.store=system://system system.descriptor.childname=sys:descriptor system.descriptor.current.childname=sys:descriptor-current
  • 7. # User config alfresco_user_store.store=user://alfrescoUserStore alfresco_user_store.system_container.childname=sys:system alfresco_user_store.user_container.childname=sys:people # note: default admin username - should not be changed after installation alfresco_user_store.adminusername=admin # Initial password - editing this will not have any effect once the repository is installed alfresco_user_store.adminpassword=209c6174da490caeb422f3fa5a7ae634 # note: default guest username - should not be changed after installation alfresco_user_store.guestusername=guest # Used to move home folders to a new location home_folder_provider_synchronizer.enabled=false home_folder_provider_synchronizer.override_provider= home_folder_provider_synchronizer.keep_empty_parents=false # Spaces Archive Configuration spaces.archive.store=archive://SpacesStore # Spaces Configuration spaces.store=workspace://SpacesStore spaces.company_home.childname=app:company_home spaces.guest_home.childname=app:guest_home spaces.dictionary.childname=app:dictionary spaces.templates.childname=app:space_templates spaces.imapConfig.childname=app:imap_configs spaces.imap_templates.childname=app:imap_templates spaces.scheduled_actions.childname=cm:Scheduled Actions spaces.emailActions.childname=app:email_actions spaces.searchAction.childname=cm:search spaces.templates.content.childname=app:content_templates spaces.templates.email.childname=app:email_templates spaces.templates.email.invite1.childname=app:invite_email_templates spaces.templates.email.notify.childname=app:notify_email_templates spaces.templates.rss.childname=app:rss_templates spaces.savedsearches.childname=app:saved_searches spaces.scripts.childname=app:scripts spaces.wcm.childname=app:wcm spaces.wcm_content_forms.childname=app:wcm_forms spaces.content_forms.childname=app:forms spaces.user_homes.childname=app:user_homes spaces.user_homes.regex.key=userName spaces.user_homes.regex.pattern= spaces.user_homes.regex.group_order= spaces.sites.childname=st:sites spaces.templates.email.invite.childname=cm:invite spaces.rendition.rendering_actions.childname=app:rendering_actions spaces.replication.replication_actions.childname=app:replication_actions spaces.wcm_deployed.childname=cm:wcm_deployed spaces.transfers.childname=app:transfers spaces.transfer_groups.childname=app:transfer_groups spaces.transfer_temp.childname=app:temp spaces.inbound_transfer_records.childname=app:inbound_transfer_records spaces.models.childname=app:models
  • 8. spaces.workflow.definitions.childname=app:workflow_defs spaces.webscripts.childname=cm:webscripts spaces.extension_webscripts.childname=cm:extensionwebscripts # ADM VersionStore Configuration version.store.enableAutoVersioning=true version.store.deprecated.lightWeightVersionStore=workspace://lightWeightVersionStore version.store.version2Store=workspace://version2Store version.store.migrateVersionStore.threadCount=3 version.store.migrateVersionStore.batchSize=1 version.store.migrateCleanupJob.threadCount=3 version.store.migrateCleanupJob.batchSize=1 # WARNING: For non-production testing only !!! Do not change (to avoid version store issues, including possible mismatch). Should be false since lightWeightVersionStore is deprecated. version.store.onlyUseDeprecatedV1=false # The CRON expression to trigger migration of the version store from V1 (2.x) to V2 (3.x) # By default, this is effectively 'never' but can be modified as required. # Examples: # Never: * * * * * ? 2099 # Once every thirty minutes: 0 0/30 * * * ? # See http://www.quartz-scheduler.org/docs/tutorials/crontrigger.html version.store.migrateVersionStore.cronExpression=* * * * * ? 2099 # Limit number of version histories to migrate per job cycle, where -1 = unlimited. Note: if limit > 0 then need to schedule job to run regularly in order to complete the migration. version.store.migrateVersionStore.limitPerJobCycle=-1 version.store.migrateVersionStore.runAsScheduledJob=false # Folders for storing people system.system_container.childname=sys:system system.people_container.childname=sys:people system.authorities_container.childname=sys:authorities system.zones_container.childname=sys:zones # Folders for storing workflow related info system.workflow_container.childname=sys:workflow # Are user names case sensitive? user.name.caseSensitive=false domain.name.caseSensitive=false domain.separator= # AVM Specific properties. avm.remote.idlestream.timeout=30000 #Format caption extracted from the XML Schema. xforms.formatCaption=true # ECM content usages/quotas system.usages.enabled=true system.usages.clearBatchSize=50 system.usages.updateBatchSize=50
  • 9. # Repository endpoint - used by Activity Service repo.remote.url=http://localhost:8080/alfresco repo.remote.endpoint=/service # Create home folders as people are created (true) or create them lazily (false) home.folder.creation.eager=true # Should we consider zero byte content to be the same as no content when firing # content update policies? Prevents 'premature' firing of inbound content rules # for some clients such as Mac OS X Finder policy.content.update.ignoreEmpty=false # The well known RMI registry port and external host name published in the stubs # is defined in the alfresco-shared.properties file # # alfresco.rmi.services.port=50500 # Default value of alfresco.rmi.services.host is 0.0.0.0 which means 'listen on all adapters'. # This allows connections to JMX both remotely and locally. # alfresco.rmi.services.host=0.0.0.0 # RMI service ports for the individual services. # These eight services are available remotely. # # Assign individual ports for each service for best performance # or run several services on the same port, you can even run everything on 50500 if # running through a firewall. # # Specify 0 to use a random unused port. # avm.rmi.service.port=50501 avmsync.rmi.service.port=50502 authentication.rmi.service.port=50504 repo.rmi.service.port=50505 action.rmi.service.port=50506 deployment.rmi.service.port=50507 monitor.rmi.service.port=50508 # Should the Mbean server bind to an existing server. Set to true for most application servers. # false for WebSphere clusters. mbean.server.locateExistingServerIfPossible=true # External executable locations ooo.exe=soffice ooo.user=${dir.root}/oouser img.root=./ImageMagick img.dyn=${img.root}/lib img.exe=${img.root}/bin/convert swf.exe=./bin/pdf2swf # Configuration for handling of failing thumbnails. # See NodeEligibleForRethumbnailingEvaluator's javadoc for details. # # Retry periods limit the frequency with which the repository will attempt to create Share
  • 10. thumbnails # for content nodes which have previously failed in their thumbnail attempts. # These periods are in seconds. # # 604800s = 60s * 60m * 24h * 7d = 1 week system.thumbnail.retryPeriod=60 system.thumbnail.retryCount=2 system.thumbnail.quietPeriod=604800 system.thumbnail.quietPeriodRetriesEnabled=true # Property to enable upgrade from 2.1-A V2.1-A.fixes.to.schema=0 #V2.1-A.fixes.to.schema=82 # The default authentication chain authentication.chain=alfrescoNtlm1:alfrescoNtlm # Do authentication tickets expire or live for ever? authentication.ticket.ticketsExpire=false # If ticketsEpire is true then how they should expire? # Valid values are: AFTER_INACTIVITY, AFTER_FIXED_TIME, DO_NOT_EXPIRE # The default is AFTER_FIXED_TIME authentication.ticket.expiryMode=AFTER_FIXED_TIME # If authentication.ticket.ticketsExpire is true and # authentication.ticket.expiryMode is AFTER_FIXED_TIME or AFTER_INACTIVITY, # this controls the minimum period for which tickets are valid. # The default is PT1H for one hour. authentication.ticket.validDuration=PT1H # Default NFS user mappings (empty). Note these users will be able to # authenticate through NFS without password so ensure NFS port is secure before # enabling and adding mappings nfs.enabled=false nfs.user.mappings= nfs.user.mappings.default.uid=0 nfs.user.mappings.default.gid=0 #Example NFS user mappings #nfs.user.mappings=admin,user1 #nfs.user.mappings.value.admin.uid=0 #nfs.user.mappings.value.admin.gid=0 #nfs.user.mappings.value.user1.uid=500 #nfs.user.mappings.value.user1.gid=500 # IMAP imap.server.enabled=false imap.server.port=143 imap.server.attachments.extraction.enabled=true # Default IMAP mount points imap.config.home.store=${spaces.store} imap.config.home.rootPath=/${spaces.company_home.childname} imap.config.home.folderPath=Imap Home imap.config.server.mountPoints=AlfrescoIMAP imap.config.server.mountPoints.default.mountPointName=IMAP imap.config.server.mountPoints.default.modeName=ARCHIVE imap.config.server.mountPoints.default.store=${spaces.store}
  • 11. imap.config.server.mountPoints.default.rootPath=/${spaces.company_home.childname} imap.config.server.mountPoints.value.AlfrescoIMAP.mountPointName=Alfresco IMAP imap.config.server.mountPoints.value.AlfrescoIMAP.modeName=MIXED # Activity feed max size and max age (eg. 44640 mins = 31 days) activities.feed.max.size=100 activities.feed.max.age.mins=44640 # Subsystem unit test values. Will not have any effect on production servers subsystems.test.beanProp.default.longProperty=123456789123456789 subsystems.test.beanProp.default.anotherStringProperty=Global Default subsystems.test.beanProp=inst1,inst2,inst3 subsystems.test.beanProp.value.inst2.boolProperty=true subsystems.test.beanProp.value.inst3.anotherStringProperty=Global Instance Default subsystems.test.simpleProp2=true subsystems.test.simpleProp3=Global Default3 # Deployment Service deployment.service.numberOfSendingThreads=5 deployment.service.corePoolSize=2 deployment.service.maximumPoolSize=3 # How long to wait in mS before refreshing a target lock - detects shutdown servers deployment.service.targetLockRefreshTime=60000 # How long to wait in mS from the last communication before deciding that deployment has failed, possibly # the destination is no longer available? deployment.service.targetLockTimeout=3600000 #Invitation Service # Should send emails as part of invitation process. notification.email.siteinvite=true # Transfer Service transferservice.receiver.enabled=true transferservice.receiver.stagingDir=${java.io.tmpdir}/alfresco-transfer-staging # # How long to wait in mS before refreshing a transfer lock - detects shutdown servers # Default 1 minute. transferservice.receiver.lockRefreshTime=60000 # # How many times to attempt retry the transfer lock transferservice.receiver.lockRetryCount=3 # How long to wait, in mS, before retrying the transfer lock transferservice.receiver.lockRetryWait=100 # # How long to wait, in mS, since the last contact with from the client before # timing out a transfer. Needs to be long enough to cope with network delays and "thinking # time" for both source and destination. Default 5 minutes. transferservice.receiver.lockTimeOut=300000 ; DM Receiever Properties ; ; The name of the DM Receiver target - you deploy to this target name deployment.dmr.name=alfresco ; consolidate staging, author and workflow sandboxes to one deployment.dmr.consolidate=true
  • 12. ; The name of the Alfresco receiver targer deployment.avm.name=avm ;Where should the root of the web project be stored, by default /www/avm_webapps deployment.avm.rootPath=/www/avm_webapps ; Pattern for live stores deployment by the alfresco receiver deployment.avm.storeNamePattern=%storeName%-live ; Built in deployment receiver properties for the default ; filesystem receiver ; filesystem receiver configuration deployment.filesystem.rootdir=./wcm deployment.filesystem.datadir=${deployment.filesystem.rootdir}/depdata deployment.filesystem.logdir=${deployment.filesystem.rootdir}/deplog deployment.filesystem.metadatadir=${deployment.filesystem.rootdir}/depmetadata deployment.filesystem.autofix=true deployment.filesystem.errorOnOverwrite=false ; default filesystem target configuration deployment.filesystem.default.rootdir=./www deployment.filesystem.default.name=filesystem deployment.filesystem.default.metadatadir=${deployment.filesystem.metadatadir}/default