blob: f42c3c59820d9696e7be66c9237dd59e11b73973 [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.directory.server.core;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.lang.reflect.Method;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.directory.api.ldap.codec.api.LdapApiService;
import org.apache.directory.api.ldap.codec.api.LdapApiServiceFactory;
import org.apache.directory.api.ldap.model.constants.AuthenticationLevel;
import org.apache.directory.api.ldap.model.constants.SchemaConstants;
import org.apache.directory.api.ldap.model.csn.Csn;
import org.apache.directory.api.ldap.model.csn.CsnFactory;
import org.apache.directory.api.ldap.model.cursor.Cursor;
import org.apache.directory.api.ldap.model.entry.Attribute;
import org.apache.directory.api.ldap.model.entry.DefaultEntry;
import org.apache.directory.api.ldap.model.entry.Entry;
import org.apache.directory.api.ldap.model.entry.Modification;
import org.apache.directory.api.ldap.model.entry.Value;
import org.apache.directory.api.ldap.model.exception.LdapException;
import org.apache.directory.api.ldap.model.exception.LdapNoPermissionException;
import org.apache.directory.api.ldap.model.exception.LdapOperationException;
import org.apache.directory.api.ldap.model.exception.LdapOtherException;
import org.apache.directory.api.ldap.model.ldif.ChangeType;
import org.apache.directory.api.ldap.model.ldif.LdifEntry;
import org.apache.directory.api.ldap.model.ldif.LdifReader;
import org.apache.directory.api.ldap.model.name.Dn;
import org.apache.directory.api.ldap.model.name.DnUtils;
import org.apache.directory.api.ldap.model.name.Rdn;
import org.apache.directory.api.ldap.model.schema.SchemaManager;
import org.apache.directory.api.ldap.util.tree.DnNode;
import org.apache.directory.api.util.TimeProvider;
import org.apache.directory.api.util.DateUtils;
import org.apache.directory.api.util.Strings;
import org.apache.directory.api.util.exception.NotImplementedException;
import org.apache.directory.server.constants.ApacheSchemaConstants;
import org.apache.directory.server.constants.ServerDNConstants;
import org.apache.directory.server.core.admin.AdministrativePointInterceptor;
import org.apache.directory.server.core.api.AttributeTypeProvider;
import org.apache.directory.server.core.api.CoreSession;
import org.apache.directory.server.core.api.DirectoryService;
import org.apache.directory.server.core.api.DnFactory;
import org.apache.directory.server.core.api.InstanceLayout;
import org.apache.directory.server.core.api.InterceptorEnum;
import org.apache.directory.server.core.api.LdapPrincipal;
import org.apache.directory.server.core.api.ObjectClassProvider;
import org.apache.directory.server.core.api.OperationEnum;
import org.apache.directory.server.core.api.OperationManager;
import org.apache.directory.server.core.api.ReferralManager;
import org.apache.directory.server.core.api.administrative.AccessControlAdministrativePoint;
import org.apache.directory.server.core.api.administrative.CollectiveAttributeAdministrativePoint;
import org.apache.directory.server.core.api.administrative.SubschemaAdministrativePoint;
import org.apache.directory.server.core.api.administrative.TriggerExecutionAdministrativePoint;
import org.apache.directory.server.core.api.changelog.ChangeLog;
import org.apache.directory.server.core.api.changelog.ChangeLogEvent;
import org.apache.directory.server.core.api.changelog.Tag;
import org.apache.directory.server.core.api.changelog.TaggableSearchableChangeLogStore;
import org.apache.directory.server.core.api.event.EventService;
import org.apache.directory.server.core.api.interceptor.BaseInterceptor;
import org.apache.directory.server.core.api.interceptor.Interceptor;
import org.apache.directory.server.core.api.interceptor.context.AddOperationContext;
import org.apache.directory.server.core.api.interceptor.context.BindOperationContext;
import org.apache.directory.server.core.api.interceptor.context.HasEntryOperationContext;
import org.apache.directory.server.core.api.interceptor.context.LookupOperationContext;
import org.apache.directory.server.core.api.interceptor.context.OperationContext;
import org.apache.directory.server.core.api.journal.Journal;
import org.apache.directory.server.core.api.partition.Partition;
import org.apache.directory.server.core.api.partition.PartitionNexus;
import org.apache.directory.server.core.api.partition.PartitionTxn;
import org.apache.directory.server.core.api.schema.SchemaPartition;
import org.apache.directory.server.core.api.subtree.SubentryCache;
import org.apache.directory.server.core.api.subtree.SubtreeEvaluator;
import org.apache.directory.server.core.authn.AuthenticationInterceptor;
import org.apache.directory.server.core.authn.ppolicy.PpolicyConfigContainer;
import org.apache.directory.server.core.authz.AciAuthorizationInterceptor;
import org.apache.directory.server.core.authz.DefaultAuthorizationInterceptor;
import org.apache.directory.server.core.changelog.ChangeLogInterceptor;
import org.apache.directory.server.core.changelog.DefaultChangeLog;
import org.apache.directory.server.core.collective.CollectiveAttributeInterceptor;
import org.apache.directory.server.core.event.EventInterceptor;
import org.apache.directory.server.core.exception.ExceptionInterceptor;
import org.apache.directory.server.core.journal.DefaultJournal;
import org.apache.directory.server.core.journal.JournalInterceptor;
import org.apache.directory.server.core.normalization.NormalizationInterceptor;
import org.apache.directory.server.core.operational.OperationalAttributeInterceptor;
import org.apache.directory.server.core.referral.ReferralInterceptor;
import org.apache.directory.server.core.schema.SchemaInterceptor;
import org.apache.directory.server.core.shared.DefaultCoreSession;
import org.apache.directory.server.core.shared.DefaultDnFactory;
import org.apache.directory.server.core.shared.partition.DefaultPartitionNexus;
import org.apache.directory.server.core.subtree.SubentryInterceptor;
import org.apache.directory.server.core.trigger.TriggerInterceptor;
import org.apache.directory.server.i18n.I18n;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default implementation of {@link DirectoryService}.
*
* @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
*/
public class DefaultDirectoryService implements DirectoryService
{
/** The logger */
private static final Logger LOG = LoggerFactory.getLogger( DefaultDirectoryService.class );
private SchemaPartition schemaPartition;
/** A reference on the SchemaManager */
private SchemaManager schemaManager;
/** The LDAP Codec Service */
private LdapApiService ldapCodecService = LdapApiServiceFactory.getSingleton();
/** the root nexus */
private DefaultPartitionNexus partitionNexus;
/** whether or not server is started for the first time */
private boolean firstStart;
/** whether or not this instance has been shutdown */
private boolean started;
/** the change log service */
private ChangeLog changeLog;
/** the journal service */
private Journal journal;
/**
* the interface used to perform various operations on this
* DirectoryService
*/
private OperationManager operationManager = new DefaultOperationManager( this );
/** the distinguished name of the administrative user */
private Dn adminDn;
/** session used as admin for internal operations */
private CoreSession adminSession;
/** The referral manager */
private ReferralManager referralManager;
/** A flag to tell if the userPassword attribute's value must be hidden */
private boolean passwordHidden = false;
/** The service's CSN factory */
private CsnFactory csnFactory;
/** The directory instance replication ID */
private int replicaId;
/** remove me after implementation is completed */
private static final String PARTIAL_IMPL_WARNING =
"WARNING: the changelog is only partially operational and will revert\n"
+ "state without consideration of who made the original change. All reverting "
+ "changes are made by the admin user.\n Furthermore the used controls are not at "
+ "all taken into account";
/** The delay to wait between each sync on disk */
private long syncPeriodMillis;
/** The default delay to wait between sync on disk : 15 seconds */
private static final long DEFAULT_SYNC_PERIOD = 15000;
/** The default timeLimit : 100 entries */
public static final int MAX_SIZE_LIMIT_DEFAULT = 100;
/** The default timeLimit : 10 seconds */
public static final int MAX_TIME_LIMIT_DEFAULT = 10000;
/** The instance Id */
private String instanceId;
/** The server directory layout*/
private InstanceLayout instanceLayout;
/**
* A flag used to shutdown the VM when stopping the server. Useful
* when the server is standalone. If the server is embedded, we don't
* want to shutdown the VM
*/
private boolean exitVmOnShutdown = true; // allow by default
/** A flag used to indicate that a shutdown hook has been installed */
private boolean shutdownHookEnabled = true; // allow by default
/** Manage anonymous access to entries other than the RootDSE */
private boolean allowAnonymousAccess = false; // forbid by default
/** Manage the basic access control checks */
private boolean accessControlEnabled; // off by default
/** Manage the operational attributes denormalization */
private boolean denormalizeOpAttrsEnabled; // off by default
/** The list of declared interceptors */
private List<Interceptor> interceptors;
private Map<String, Interceptor> interceptorNames;
/** A lock to protect the interceptors List */
private ReadWriteLock interceptorsLock = new ReentrantReadWriteLock();
/** The read and write locks */
private Lock readLock = interceptorsLock.readLock();
private Lock writeLock = interceptorsLock.writeLock();
/** A map associating a list of interceptor to each operation */
private Map<OperationEnum, List<String>> operationInterceptors;
/** The System partition */
private Partition systemPartition;
/** The set of all declared partitions */
private Set<Partition> partitions = new HashSet<>();
/** A list of LDIF entries to inject at startup */
private List<? extends LdifEntry> testEntries = new ArrayList<>(); // List<Attributes>
/** The event service */
private EventService eventService;
/** The maximum size for an incoming PDU */
private int maxPDUSize = Integer.MAX_VALUE;
/** lock file for directory service's working directory */
private RandomAccessFile lockFile = null;
private static final String LOCK_FILE_NAME = ".dirservice.lock";
/** The AccessControl AdministrativePoint cache */
private DnNode<AccessControlAdministrativePoint> accessControlAPCache;
/** The CollectiveAttribute AdministrativePoint cache */
private DnNode<CollectiveAttributeAdministrativePoint> collectiveAttributeAPCache;
/** The Subschema AdministrativePoint cache */
private DnNode<SubschemaAdministrativePoint> subschemaAPCache;
/** The TriggerExecution AdministrativePoint cache */
private DnNode<TriggerExecutionAdministrativePoint> triggerExecutionAPCache;
/** The Dn factory */
private DnFactory dnFactory;
/** The Subentry cache */
SubentryCache subentryCache = new SubentryCache();
/** The Subtree evaluator instance */
private SubtreeEvaluator evaluator;
/** The attribute type provider */
private AttributeTypeProvider atProvider;
/** The object class provider */
private ObjectClassProvider ocProvider;
private TimeProvider timeProvider;
// ------------------------------------------------------------------------
// Constructor
// ------------------------------------------------------------------------
/**
* Creates a new instance of the directory service.
*
* @throws LdapException If the instance cannot be created
*/
public DefaultDirectoryService() throws LdapException
{
changeLog = new DefaultChangeLog();
journal = new DefaultJournal();
syncPeriodMillis = DEFAULT_SYNC_PERIOD;
csnFactory = new CsnFactory( replicaId );
evaluator = new SubtreeEvaluator( schemaManager );
setDefaultInterceptorConfigurations();
timeProvider = TimeProvider.DEFAULT;
}
// ------------------------------------------------------------------------
// C O N F I G U R A T I O N M E T H O D S
// ------------------------------------------------------------------------
public void setInstanceId( String instanceId )
{
this.instanceId = instanceId;
}
public String getInstanceId()
{
return instanceId;
}
/**
* Gets the {@link Partition}s used by this DirectoryService.
*
* @return the set of partitions used
*/
public Set<? extends Partition> getPartitions()
{
Set<Partition> cloned = new HashSet<>();
cloned.addAll( partitions );
return cloned;
}
/**
* Sets {@link Partition}s used by this DirectoryService.
*
* @param partitions the partitions to used
*/
public void setPartitions( Set<? extends Partition> partitions )
{
Set<Partition> cloned = new HashSet<>();
cloned.addAll( partitions );
Set<String> names = new HashSet<>();
for ( Partition partition : cloned )
{
String id = partition.getId();
if ( names.contains( id ) )
{
LOG.warn( "Encountered duplicate partition {} identifier.", id );
}
names.add( id );
}
this.partitions = cloned;
}
/**
* Returns <tt>true</tt> if access control checks are enabled.
*
* @return true if access control checks are enabled, false otherwise
*/
public boolean isAccessControlEnabled()
{
return accessControlEnabled;
}
/**
* Sets whether to enable basic access control checks or not.
*
* @param accessControlEnabled true to enable access control checks, false otherwise
*/
public void setAccessControlEnabled( boolean accessControlEnabled )
{
this.accessControlEnabled = accessControlEnabled;
}
/**
* Returns <tt>true</tt> if anonymous access is allowed on entries besides the RootDSE.
* If the access control subsystem is enabled then access to some entries may not be
* allowed even when full anonymous access is enabled.
*
* @return true if anonymous access is allowed on entries besides the RootDSE, false
* if anonymous access is allowed to all entries.
*/
public boolean isAllowAnonymousAccess()
{
return allowAnonymousAccess;
}
/**
* Sets whether to allow anonymous access to entries other than the RootDSE. If the
* access control subsystem is enabled then access to some entries may not be allowed
* even when full anonymous access is enabled.
*
* @param enableAnonymousAccess true to enable anonymous access, false to disable it
*/
public void setAllowAnonymousAccess( boolean enableAnonymousAccess )
{
this.allowAnonymousAccess = enableAnonymousAccess;
}
/**
* Returns interceptors in the server.
*
* @return the interceptors in the server.
*/
public List<Interceptor> getInterceptors()
{
List<Interceptor> cloned = new ArrayList<>();
readLock.lock();
try
{
cloned.addAll( interceptors );
return cloned;
}
finally
{
readLock.unlock();
}
}
/**
* Returns interceptors in the server for a given operation.
*
* @return the interceptors in the server for the given operation.
*/
public List<String> getInterceptors( OperationEnum operation )
{
List<String> cloned = new ArrayList<>();
readLock.lock();
try
{
cloned.addAll( operationInterceptors.get( operation ) );
return cloned;
}
finally
{
readLock.unlock();
}
}
/**
* Compute the list of to call for each operation
*/
private void initOperationsList()
{
writeLock.lock();
try
{
operationInterceptors = new ConcurrentHashMap<>();
for ( OperationEnum operation : OperationEnum.getOperations() )
{
List<String> operationList = new ArrayList<>();
for ( Interceptor interceptor : interceptors )
{
gatherInterceptors( interceptor, interceptor.getClass(), operation, operationList );
}
operationInterceptors.put( operation, operationList );
}
}
finally
{
writeLock.unlock();
}
}
/**
* Recursively checks if the given interceptor can be added to the list of interceptors for a given
* operation and adds to the list of interceptors if it implements the respective operation
*
* @param interceptor the instance of the interceptor
* @param interceptorClz the class of the interceptor
* @param operation type of operation
* @param selectedInterceptorList the list of selected interceptors
*/
private void gatherInterceptors( Interceptor interceptor, Class<?> interceptorClz, OperationEnum operation,
List<String> selectedInterceptorList )
{
// We stop recursing when we reach the Base class
if ( ( interceptorClz == null ) || ( interceptorClz == BaseInterceptor.class ) )
{
return;
}
// We don't call getMethods() because it would get back the default methods
// from the BaseInterceptor, something we don't want.
Method[] methods = interceptorClz.getDeclaredMethods();
for ( Method method : methods )
{
Class<?>[] param = method.getParameterTypes();
// check for the correct signature
if ( ( param != null ) && ( param.length == 1 )
&& OperationContext.class.isAssignableFrom( param[0] ) && method.getName().equals( operation.getMethodName() ) )
{
if ( !selectedInterceptorList.contains( interceptor.getName() ) )
{
selectedInterceptorList.add( interceptor.getName() );
}
break;
}
}
// Recurse on extended classes, as we have used getDeclaredMethods() instead of getmethods()
gatherInterceptors( interceptor, interceptorClz.getSuperclass(), operation, selectedInterceptorList );
}
/**
* Add an interceptor to the list of interceptors to call for each operation
* @throws LdapException
*/
private void addInterceptor( Interceptor interceptor, int position ) throws LdapException
{
// First, init the interceptor
interceptor.init( this );
writeLock.lock();
try
{
for ( OperationEnum operation : OperationEnum.getOperations() )
{
List<String> operationList = operationInterceptors.get( operation );
Method[] methods = interceptor.getClass().getDeclaredMethods();
for ( Method method : methods )
{
if ( method.getName().equals( operation.getMethodName() ) )
{
if ( position == -1 )
{
operationList.add( interceptor.getName() );
}
else
{
operationList.add( position, interceptor.getName() );
}
break;
}
}
}
interceptorNames.put( interceptor.getName(), interceptor );
if ( position == -1 )
{
interceptors.add( interceptor );
}
else
{
interceptors.add( position, interceptor );
}
}
finally
{
writeLock.unlock();
}
}
/**
* Remove an interceptor to the list of interceptors to call for each operation
*/
private void removeOperationsList( String interceptorName )
{
Interceptor interceptor = interceptorNames.get( interceptorName );
writeLock.lock();
try
{
for ( OperationEnum operation : OperationEnum.getOperations() )
{
List<String> operationList = operationInterceptors.get( operation );
Method[] methods = interceptor.getClass().getDeclaredMethods();
for ( Method method : methods )
{
if ( method.getName().equals( operation.getMethodName() ) )
{
operationList.remove( interceptor.getName() );
break;
}
}
}
interceptorNames.remove( interceptorName );
interceptors.remove( interceptor );
}
finally
{
writeLock.unlock();
}
}
/**
* Sets the interceptors in the server.
*
* @param interceptors the interceptors to be used in the server.
*/
public void setInterceptors( List<Interceptor> interceptors )
{
Map<String, Interceptor> interceptorNames = new ConcurrentHashMap<>();
// Check if we don't have duplicate names in the interceptors list
for ( Interceptor interceptor : interceptors )
{
if ( interceptorNames.containsKey( interceptor.getName() ) )
{
LOG.warn( "Encountered duplicate definitions for {} interceptor", interceptor.getName() );
continue;
}
interceptorNames.put( interceptor.getName(), interceptor );
}
this.interceptors = interceptors;
this.interceptorNames = interceptorNames;
// Now update the Map that connect each operation with the list of interceptors.
initOperationsList();
}
/**
* Initialize the interceptors
*/
private void initInterceptors() throws LdapException
{
for ( Interceptor interceptor : interceptors )
{
interceptor.init( this );
}
}
/**
* Returns test directory entries({@link LdifEntry}) to be loaded while
* bootstrapping.
*
* @return test entries to load during bootstrapping
*/
public List<LdifEntry> getTestEntries()
{
List<LdifEntry> cloned = new ArrayList<>();
cloned.addAll( testEntries );
return cloned;
}
/**
* Sets test directory entries to be loaded while bootstrapping.
*
* @param testEntries the test entries to load while bootstrapping
*/
public void setTestEntries( List<? extends LdifEntry> testEntries )
{
//noinspection MismatchedQueryAndUpdateOfCollection
List<LdifEntry> cloned = new ArrayList<>();
cloned.addAll( testEntries );
this.testEntries = testEntries;
}
/**
* {@inheritDoc}
*/
public InstanceLayout getInstanceLayout()
{
return instanceLayout;
}
/**
* {@inheritDoc}
*/
public void setInstanceLayout( InstanceLayout instanceLayout ) throws IOException
{
this.instanceLayout = instanceLayout;
// Create the directories if they are missing
if ( !instanceLayout.getInstanceDirectory().exists() && !instanceLayout.getInstanceDirectory().mkdirs() )
{
throw new IOException( I18n.err( I18n.ERR_112_COULD_NOT_CREATE_DIRECTORY,
instanceLayout.getInstanceDirectory() ) );
}
if ( !instanceLayout.getLogDirectory().exists() && !instanceLayout.getLogDirectory().mkdirs() )
{
throw new IOException( I18n.err( I18n.ERR_112_COULD_NOT_CREATE_DIRECTORY,
instanceLayout.getLogDirectory() ) );
}
if ( !instanceLayout.getRunDirectory().exists() && !instanceLayout.getRunDirectory().mkdirs() )
{
throw new IOException( I18n.err( I18n.ERR_112_COULD_NOT_CREATE_DIRECTORY,
instanceLayout.getRunDirectory() ) );
}
if ( !instanceLayout.getPartitionsDirectory().exists() && !instanceLayout.getPartitionsDirectory().mkdirs() )
{
throw new IOException( I18n.err( I18n.ERR_112_COULD_NOT_CREATE_DIRECTORY,
instanceLayout.getPartitionsDirectory() ) );
}
if ( !instanceLayout.getConfDirectory().exists() && !instanceLayout.getConfDirectory().mkdirs() )
{
throw new IOException( I18n.err( I18n.ERR_112_COULD_NOT_CREATE_DIRECTORY,
instanceLayout.getConfDirectory() ) );
}
}
public void setShutdownHookEnabled( boolean shutdownHookEnabled )
{
this.shutdownHookEnabled = shutdownHookEnabled;
}
public boolean isShutdownHookEnabled()
{
return shutdownHookEnabled;
}
public void setExitVmOnShutdown( boolean exitVmOnShutdown )
{
this.exitVmOnShutdown = exitVmOnShutdown;
}
public boolean isExitVmOnShutdown()
{
return exitVmOnShutdown;
}
public void setSystemPartition( Partition systemPartition )
{
this.systemPartition = systemPartition;
}
public Partition getSystemPartition()
{
return systemPartition;
}
/**
* return true if the operational attributes must be normalized when returned
*/
public boolean isDenormalizeOpAttrsEnabled()
{
return denormalizeOpAttrsEnabled;
}
/**
* Sets whether the operational attributes are denormalized when returned
* @param denormalizeOpAttrsEnabled The flag value
*/
public void setDenormalizeOpAttrsEnabled( boolean denormalizeOpAttrsEnabled )
{
this.denormalizeOpAttrsEnabled = denormalizeOpAttrsEnabled;
}
/**
* {@inheritDoc}
*/
public ChangeLog getChangeLog()
{
return changeLog;
}
/**
* {@inheritDoc}
*/
public Journal getJournal()
{
return journal;
}
/**
* {@inheritDoc}
*/
public void setChangeLog( ChangeLog changeLog )
{
this.changeLog = changeLog;
}
/**
* {@inheritDoc}
*/
public void setJournal( Journal journal )
{
this.journal = journal;
}
/**
* {@inheritDoc}
*/
public void addPartition( Partition partition ) throws LdapException
{
partition.setSchemaManager( schemaManager );
// can be null when called before starting up
if ( partitionNexus != null )
{
partitionNexus.addContextPartition( partition );
}
// Now, add the partition to the set of managed partitions
partitions.add( partition );
}
/**
* {@inheritDoc}
*/
public void removePartition( Partition partition ) throws LdapException
{
// Do the backend cleanup first
// can be null when called before starting up
if ( partitionNexus != null )
{
partitionNexus.removeContextPartition( partition.getSuffixDn().getNormName() );
}
// And update the set of managed partitions
partitions.remove( partition );
}
// ------------------------------------------------------------------------
// BackendSubsystem Interface Method Implementations
// ------------------------------------------------------------------------
/**
* Define a default list of interceptors that has to be used if no other
* configuration is defined.
*/
private void setDefaultInterceptorConfigurations()
{
// Set default interceptor chains
List<Interceptor> list = new ArrayList<>();
list.add( new NormalizationInterceptor() );
list.add( new AuthenticationInterceptor() );
list.add( new ReferralInterceptor() );
list.add( new AciAuthorizationInterceptor() );
list.add( new DefaultAuthorizationInterceptor() );
list.add( new AdministrativePointInterceptor() );
list.add( new ExceptionInterceptor() );
list.add( new SchemaInterceptor() );
list.add( new OperationalAttributeInterceptor() );
list.add( new CollectiveAttributeInterceptor() );
list.add( new SubentryInterceptor() );
list.add( new EventInterceptor() );
list.add( new TriggerInterceptor() );
list.add( new ChangeLogInterceptor() );
list.add( new JournalInterceptor() );
setInterceptors( list );
}
public CoreSession getAdminSession()
{
return adminSession;
}
/**
* Get back an anonymous session
*/
public CoreSession getSession()
{
return new DefaultCoreSession( new LdapPrincipal( schemaManager ), this );
}
/**
* Get back a session for a given principal
*/
public CoreSession getSession( LdapPrincipal principal )
{
return new DefaultCoreSession( principal, this );
}
/**
* Get back a session for the give user and credentials bound with Simple Bind
*/
public CoreSession getSession( Dn principalDn, byte[] credentials ) throws LdapException
{
synchronized ( this )
{
if ( !started )
{
throw new IllegalStateException( "Service has not started." );
}
}
BindOperationContext bindContext = new BindOperationContext( null );
bindContext.setCredentials( credentials );
if ( principalDn.isSchemaAware() )
{
bindContext.setDn( principalDn );
}
else
{
bindContext.setDn( new Dn( schemaManager, principalDn ) );
}
bindContext.setInterceptors( getInterceptors( OperationEnum.BIND ) );
operationManager.bind( bindContext );
return bindContext.getSession();
}
/**
* Get back a session for a given user bound with SASL Bind
*/
public CoreSession getSession( Dn principalDn, byte[] credentials, String saslMechanism, String saslAuthId )
throws LdapException
{
synchronized ( this )
{
if ( !started )
{
throw new IllegalStateException( "Service has not started." );
}
}
BindOperationContext bindContext = new BindOperationContext( null );
bindContext.setTransaction( partitionNexus.beginReadTransaction() );
bindContext.setCredentials( credentials );
if ( principalDn.isSchemaAware() )
{
bindContext.setDn( principalDn );
}
else
{
bindContext.setDn( new Dn( schemaManager, principalDn ) );
}
bindContext.setSaslMechanism( saslMechanism );
bindContext.setInterceptors( getInterceptors( OperationEnum.BIND ) );
operationManager.bind( bindContext );
return bindContext.getSession();
}
public long revert() throws LdapException
{
if ( changeLog == null || !changeLog.isEnabled() )
{
throw new IllegalStateException( I18n.err( I18n.ERR_310 ) );
}
Tag latest = changeLog.getLatest();
if ( null != latest )
{
if ( latest.getRevision() < changeLog.getCurrentRevision() )
{
return revert( latest.getRevision() );
}
else
{
LOG.info( "Ignoring request to revert without changes since the latest tag." );
return changeLog.getCurrentRevision();
}
}
throw new IllegalStateException( I18n.err( I18n.ERR_311 ) );
}
/**
* We handle the ModDN/ModRDN operation for the revert here.
*/
private void moddn( Dn oldDn, Dn newDn, boolean delOldRdn ) throws LdapException
{
if ( oldDn.size() == 0 )
{
throw new LdapNoPermissionException( I18n.err( I18n.ERR_312 ) );
}
// calculate parents
Dn oldBase = oldDn.getParent();
Dn newBase = newDn.getParent();
// Compute the Rdn for each of the Dn
Rdn newRdn = newDn.getRdn();
Rdn oldRdn = oldDn.getRdn();
/*
* We need to determine if this rename operation corresponds to a simple
* Rdn name change or a move operation. If the two names are the same
* except for the Rdn then it is a simple modifyRdn operation. If the
* names differ in size or have a different baseDN then the operation is
* a move operation. Furthermore if the Rdn in the move operation
* changes it is both an Rdn change and a move operation.
*/
if ( ( oldDn.size() == newDn.size() ) && oldBase.equals( newBase ) )
{
adminSession.rename( oldDn, newRdn, delOldRdn );
}
else
{
Dn target = newDn.getParent();
if ( newRdn.equals( oldRdn ) )
{
adminSession.move( oldDn, target );
}
else
{
adminSession.moveAndRename( oldDn, target, newRdn, delOldRdn );
}
}
}
public long revert( long revision ) throws LdapException
{
if ( changeLog == null || !changeLog.isEnabled() )
{
throw new IllegalStateException( I18n.err( I18n.ERR_310 ) );
}
if ( revision < 0 )
{
throw new IllegalArgumentException( I18n.err( I18n.ERR_239 ) );
}
if ( revision >= changeLog.getChangeLogStore().getCurrentRevision() )
{
throw new IllegalArgumentException( I18n.err( I18n.ERR_314 ) );
}
Cursor<ChangeLogEvent> cursor = changeLog.getChangeLogStore().findAfter( revision );
/*
* BAD, BAD, BAD!!!
*
* No synchronization no nothing. Just getting this to work for now
* so we can revert tests. Any production grade use of this feature
* needs to synchronize on all changes while the revert is in progress.
*
* How about making this operation transactional?
*
* First of all just stop using JNDI and construct the operations to
* feed into the interceptor pipeline.
*
* TODO review this code.
*/
PartitionTxn transaction = systemPartition.beginWriteTransaction();
// Speedup the addition by using a global transaction
adminSession.addTransaction( systemPartition, transaction );
adminSession.beginSessionTransaction();
try
{
LOG.warn( PARTIAL_IMPL_WARNING );
cursor.afterLast();
while ( cursor.previous() ) // apply ldifs in reverse order
{
ChangeLogEvent event = cursor.get();
List<LdifEntry> reverses = event.getReverseLdifs();
for ( LdifEntry reverse : reverses )
{
switch ( reverse.getChangeType().getChangeType() )
{
case ChangeType.ADD_ORDINAL:
adminSession.add(
new DefaultEntry( schemaManager, reverse.getEntry() ), true );
break;
case ChangeType.DELETE_ORDINAL:
adminSession.delete( reverse.getDn(), true );
break;
case ChangeType.MODIFY_ORDINAL:
List<Modification> mods = reverse.getModifications();
adminSession.modify( reverse.getDn(), mods, true );
break;
case ChangeType.MODDN_ORDINAL:
// NO BREAK - both ModDN and ModRDN handling is the same
case ChangeType.MODRDN_ORDINAL:
Dn forwardDn = event.getForwardLdif().getDn();
Dn reverseDn = reverse.getDn();
moddn( reverseDn, forwardDn, reverse.isDeleteOldRdn() );
break;
default:
LOG.error( I18n.err( I18n.ERR_75 ) );
throw new NotImplementedException( I18n.err( I18n.ERR_76, reverse.getChangeType() ) );
}
}
adminSession.endSessionTransaction( true );
}
}
catch ( Exception e )
{
try
{
adminSession.endSessionTransaction( false );
}
catch ( IOException ioe )
{
throw new LdapOperationException( ioe.getMessage(), ioe );
}
throw new LdapOperationException( e.getMessage(), e );
}
finally
{
try
{
cursor.close();
}
catch ( Exception e )
{
throw new LdapOperationException( e.getMessage(), e );
}
}
return changeLog.getCurrentRevision();
}
public OperationManager getOperationManager()
{
return operationManager;
}
/**
* @throws LdapException if the LDAP server cannot be started
*/
public synchronized void startup() throws LdapException
{
if ( started )
{
return;
}
lockWorkDir();
if ( shutdownHookEnabled )
{
Runtime.getRuntime().addShutdownHook( new Thread( new Runnable()
{
public void run()
{
try
{
shutdown();
}
catch ( Exception e )
{
LOG.warn( "Failed to shut down the directory service: "
+ DefaultDirectoryService.this.instanceId, e );
}
}
}, "ApacheDS Shutdown Hook (" + instanceId + ')' ) );
LOG.info( "ApacheDS shutdown hook has been registered with the runtime." );
}
else if ( LOG.isWarnEnabled() )
{
LOG.warn( "ApacheDS shutdown hook has NOT been registered with the runtime."
+ " This default setting for standalone operation has been overriden." );
}
initialize();
showSecurityWarnings();
started = true;
if ( !testEntries.isEmpty() )
{
createTestEntries();
}
}
public synchronized void sync() throws LdapException
{
if ( !started )
{
return;
}
this.changeLog.sync();
this.partitionNexus.sync();
}
public synchronized void shutdown() throws LdapException
{
LOG.debug( "+++ DirectoryService Shutdown required" );
if ( !started )
{
return;
}
// --------------------------------------------------------------------
// Shutdown the sync thread
// --------------------------------------------------------------------
LOG.debug( "--- Syncing the nexus " );
LOG.debug( "--- Flushing everything before quitting" );
operationManager.lockWrite();
partitionNexus.sync();
operationManager.unlockWrite();
// --------------------------------------------------------------------
// Shutdown the changelog
// --------------------------------------------------------------------
LOG.debug( "--- Syncing the changeLog " );
changeLog.sync();
changeLog.destroy();
// --------------------------------------------------------------------
// Shutdown the journal if enabled
// --------------------------------------------------------------------
if ( journal.isEnabled() )
{
LOG.debug( "--- Destroying the journal " );
journal.destroy();
}
// --------------------------------------------------------------------
// Shutdown the partition
// --------------------------------------------------------------------
LOG.debug( "--- Destroying the nexus" );
partitionNexus.destroy( null );
// --------------------------------------------------------------------
// Shutdown the interceptors
// --------------------------------------------------------------------
LOG.debug( "--- Destroying the interceptors" );
for ( Interceptor interceptor : interceptors )
{
interceptor.destroy();
}
// --------------------------------------------------------------------
// And shutdown the server
// --------------------------------------------------------------------
LOG.debug( "---Deleting the DnCache" );
dnFactory = null;
if ( lockFile != null )
{
try
{
lockFile.close();
// no need to delete the lock file
}
catch ( IOException e )
{
LOG.warn( "couldn't delete the lock file {}", LOCK_FILE_NAME );
}
}
LOG.debug( "+++ DirectoryService stopped" );
started = false;
}
/**
* @return The referral manager
*/
public ReferralManager getReferralManager()
{
return referralManager;
}
/**
* Set the referralManager
* @param referralManager The initialized referralManager
*/
public void setReferralManager( ReferralManager referralManager )
{
this.referralManager = referralManager;
}
/**
* @return the SchemaManager
*/
public SchemaManager getSchemaManager()
{
return schemaManager;
}
/**
* Set the SchemaManager instance.
*
* @param schemaManager The server schemaManager
*/
public void setSchemaManager( SchemaManager schemaManager )
{
this.schemaManager = schemaManager;
}
public LdapApiService getLdapCodecService()
{
return ldapCodecService;
}
/**
* {@inheritDoc}
*/
public SchemaPartition getSchemaPartition()
{
return schemaPartition;
}
/**
* {@inheritDoc}
*/
public void setSchemaPartition( SchemaPartition schemaPartition )
{
this.schemaPartition = schemaPartition;
}
public DefaultPartitionNexus getPartitionNexus()
{
return partitionNexus;
}
public boolean isFirstStart()
{
return firstStart;
}
public synchronized boolean isStarted()
{
return started;
}
public Entry newEntry( Dn dn )
{
return new DefaultEntry( schemaManager, dn );
}
/**
* Add a new entry into the server
*/
private void addEntry( Entry serverEntry ) throws LdapException
{
Partition partition = partitionNexus.getPartition( serverEntry.getDn() );
AddOperationContext addContext = new AddOperationContext( adminSession, serverEntry );
PartitionTxn partitionTxn = null;
try
{
partitionTxn = partition.beginWriteTransaction();
addContext.setTransaction( partitionTxn );
addContext.setPartition( partition );
partitionNexus.add( addContext );
partitionTxn.commit();
}
catch ( LdapException le )
{
try
{
partitionTxn.abort();
}
catch ( IOException ioe )
{
throw new LdapOtherException( ioe.getMessage(), ioe );
}
throw le;
}
catch ( IOException ioe )
{
try
{
partitionTxn.abort();
}
catch ( IOException ioe2 )
{
throw new LdapOtherException( ioe2.getMessage(), ioe2 );
}
throw new LdapOtherException( ioe.getMessage(), ioe );
}
}
/**
* Returns true if we had to create the bootstrap entries on the first
* start of the server. Otherwise if all entries exist, meaning none
* had to be created, then we are not starting for the first time.
*
* @return true if the bootstrap entries had to be created, false otherwise
* @throws LdapException if entries cannot be created
*/
private boolean createBootstrapEntries() throws LdapException, IOException
{
boolean firstStart = false;
// -------------------------------------------------------------------
// create admin entry
// -------------------------------------------------------------------
/*
* If the admin entry is there, then the database was already created
*/
Partition partition = partitionNexus.getPartition( adminDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, adminDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, adminDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT,
SchemaConstants.TOP_OC,
SchemaConstants.PERSON_OC,
SchemaConstants.ORGANIZATIONAL_PERSON_OC,
SchemaConstants.INET_ORG_PERSON_OC );
serverEntry.put( SchemaConstants.UID_AT, PartitionNexus.ADMIN_UID );
serverEntry.put( SchemaConstants.USER_PASSWORD_AT, PartitionNexus.ADMIN_PASSWORD_BYTES );
serverEntry.put( SchemaConstants.DISPLAY_NAME_AT, "Directory Superuser" );
serverEntry.put( SchemaConstants.CN_AT, "system administrator" );
serverEntry.put( SchemaConstants.SN_AT, "administrator" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.put( SchemaConstants.DISPLAY_NAME_AT, "Directory Superuser" );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system users area
// -------------------------------------------------------------------
Dn userDn = getDnFactory().create( ServerDNConstants.USERS_SYSTEM_DN );
partition = partitionNexus.getPartition( userDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, userDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, userDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT,
SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "users" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system groups area
// -------------------------------------------------------------------
Dn groupDn = getDnFactory().create( ServerDNConstants.GROUPS_SYSTEM_DN );
partition = partitionNexus.getPartition( groupDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, groupDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, groupDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT,
SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "groups" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create administrator group
// -------------------------------------------------------------------
Dn name = getDnFactory().create( ServerDNConstants.ADMINISTRATORS_GROUP_DN );
partition = partitionNexus.getPartition( name );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, name );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, name );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT,
SchemaConstants.TOP_OC,
SchemaConstants.GROUP_OF_UNIQUE_NAMES_OC );
serverEntry.put( SchemaConstants.CN_AT, "Administrators" );
serverEntry.put( SchemaConstants.UNIQUE_MEMBER_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system configuration area
// -------------------------------------------------------------------
Dn configurationDn = getDnFactory().create( "ou=configuration,ou=system" );
partition = partitionNexus.getPartition( configurationDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, configurationDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, configurationDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "configuration" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system configuration area for partition information
// -------------------------------------------------------------------
Dn partitionsDn = getDnFactory().create( "ou=partitions,ou=configuration,ou=system" );
partition = partitionNexus.getPartition( partitionsDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, partitionsDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, partitionsDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "partitions" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system configuration area for services
// -------------------------------------------------------------------
Dn servicesDn = getDnFactory().create( "ou=services,ou=configuration,ou=system" );
partition = partitionNexus.getPartition( servicesDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, servicesDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, servicesDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "services" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system configuration area for interceptors
// -------------------------------------------------------------------
Dn interceptorsDn = getDnFactory().create( "ou=interceptors,ou=configuration,ou=system" );
partition = partitionNexus.getPartition( interceptorsDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, interceptorsDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, interceptorsDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC );
serverEntry.put( SchemaConstants.OU_AT, "interceptors" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
// -------------------------------------------------------------------
// create system preferences area
// -------------------------------------------------------------------
Dn sysPrefRootDn = getDnFactory().create( ServerDNConstants.SYSPREFROOT_SYSTEM_DN );
partition = partitionNexus.getPartition( sysPrefRootDn );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( adminSession, sysPrefRootDn );
hasEntryContext.setPartition( partition );
hasEntryContext.setTransaction( partitionTxn );
if ( !partitionNexus.hasEntry( hasEntryContext ) )
{
firstStart = true;
Entry serverEntry = new DefaultEntry( schemaManager, sysPrefRootDn );
serverEntry.put( SchemaConstants.OBJECT_CLASS_AT,
SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC,
SchemaConstants.EXTENSIBLE_OBJECT_OC );
serverEntry.put( "prefNodeName", "sysPrefRoot" );
serverEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN_NORMALIZED );
serverEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
serverEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
serverEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
addEntry( serverEntry );
}
}
return firstStart;
}
/**
* Displays security warning messages if any possible secutiry issue is found.
* @throws LdapException if there are failures parsing and accessing internal structures
*/
// made protected as per the request in DIRSERVER-1920
protected void showSecurityWarnings() throws LdapException
{
// Warn if the default password is not changed.
boolean needToChangeAdminPassword;
Dn admin = getDnFactory().create( ServerDNConstants.ADMIN_SYSTEM_DN );
Partition partition = partitionNexus.getPartition( admin );
LookupOperationContext lookupContext = new LookupOperationContext( adminSession, admin );
lookupContext.setPartition( partition );
Entry adminEntry;
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
lookupContext.setTransaction( partitionTxn );
adminEntry = partitionNexus.lookup( lookupContext );
}
catch ( IOException ioe )
{
throw new LdapOtherException( ioe.getMessage(), ioe );
}
Value userPassword = adminEntry.get( SchemaConstants.USER_PASSWORD_AT ).get();
needToChangeAdminPassword = MessageDigest.isEqual( PartitionNexus.ADMIN_PASSWORD_BYTES, userPassword.getBytes() );
if ( needToChangeAdminPassword )
{
LOG.warn( "You didn't change the admin password of directory service instance '{}'. "
+ "Please update the admin password as soon as possible to prevent a possible security breach.", instanceId );
}
}
/**
* Adds test entries into the core.
*
* TODO this may no longer be needed when JNDI is not used for bootstrapping
*
* @throws LdapException if the creation of test entries fails.
*/
private void createTestEntries() throws LdapException
{
for ( LdifEntry testEntry : testEntries )
{
try
{
LdifEntry ldifEntry = testEntry.clone();
Entry entry = ldifEntry.getEntry();
String dn = ldifEntry.getDn().getName();
try
{
getAdminSession().add( new DefaultEntry( schemaManager, entry ) );
}
catch ( Exception e )
{
LOG.warn( dn + " test entry already exists.", e );
}
}
catch ( CloneNotSupportedException cnse )
{
LOG.warn( "Cannot clone the entry ", cnse );
}
}
}
private void initializeSystemPartition() throws LdapException, IOException
{
Partition system = getSystemPartition();
// Add root context entry for system partition
Dn systemSuffixDn = getDnFactory().create( ServerDNConstants.SYSTEM_DN );
CoreSession admin = getAdminSession();
HasEntryOperationContext hasEntryContext = new HasEntryOperationContext( admin, systemSuffixDn );
Partition partition = getPartitionNexus().getPartition( systemSuffixDn );
hasEntryContext.setPartition( partition );
try ( PartitionTxn partitionTxn = partition.beginReadTransaction() )
{
hasEntryContext.setTransaction( partitionTxn );
if ( !system.hasEntry( hasEntryContext ) )
{
Entry systemEntry = new DefaultEntry( schemaManager, systemSuffixDn );
// Add the ObjectClasses
systemEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC,
SchemaConstants.ORGANIZATIONAL_UNIT_OC, SchemaConstants.EXTENSIBLE_OBJECT_OC );
// Add some operational attributes
systemEntry.put( SchemaConstants.CREATORS_NAME_AT, ServerDNConstants.ADMIN_SYSTEM_DN );
systemEntry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime( getTimeProvider() ) );
systemEntry.add( SchemaConstants.ENTRY_CSN_AT, getCSN().toString() );
systemEntry.add( SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString() );
systemEntry.put( DnUtils.getRdnAttributeType( ServerDNConstants.SYSTEM_DN ), DnUtils
.getRdnValue( ServerDNConstants.SYSTEM_DN ) );
AddOperationContext addOperationContext = new AddOperationContext( admin, systemEntry );
addOperationContext.setPartition( partition );
PartitionTxn writeTxn = null;
try
{
writeTxn = partition.beginWriteTransaction();
addOperationContext.setTransaction( writeTxn );
system.add( addOperationContext );
writeTxn.commit();
}
catch ( LdapException le )
{
try
{
writeTxn.abort();
}
catch ( IOException ioe )
{
throw new LdapOtherException( ioe.getMessage(), ioe );
}
throw le;
}
catch ( IOException ioe )
{
try
{
writeTxn.abort();
}
catch ( IOException ioe2 )
{
throw new LdapOtherException( ioe2.getMessage(), ioe2 );
}
throw new LdapOtherException( ioe.getMessage(), ioe );
}
}
}
}
/**
* Kicks off the initialization of the entire system.
*
* @throws LdapException if there are problems along the way
*/
private void initialize() throws LdapException
{
if ( LOG.isDebugEnabled() )
{
LOG.debug( "---> Initializing the DefaultDirectoryService " );
}
csnFactory.setReplicaId( replicaId );
// If no interceptor list is defined, setup a default list
if ( interceptors == null )
{
setDefaultInterceptorConfigurations();
}
// Initialize the AP caches
accessControlAPCache = new DnNode<>();
collectiveAttributeAPCache = new DnNode<>();
subschemaAPCache = new DnNode<>();
triggerExecutionAPCache = new DnNode<>();
if ( dnFactory == null )
{
dnFactory = new DefaultDnFactory( schemaManager, 10000 );
}
// triggers partition to load schema fully from schema partition
schemaPartition.initialize();
partitions.add( schemaPartition );
if ( !systemPartition.getSuffixDn().isSchemaAware() )
{
systemPartition.setSuffixDn( new Dn( schemaManager, systemPartition.getSuffixDn() ) );
}
adminDn = getDnFactory().create( ServerDNConstants.ADMIN_SYSTEM_DN );
adminSession = new DefaultCoreSession( new LdapPrincipal( schemaManager, adminDn, AuthenticationLevel.STRONG ),
this );
// TODO - NOTE: Need to find a way to instantiate without dependency on DPN
partitionNexus = new DefaultPartitionNexus( new DefaultEntry( schemaManager, Dn.ROOT_DSE ) );
partitionNexus.setDirectoryService( this );
partitionNexus.initialize();
try
{
initializeSystemPartition();
}
catch ( IOException ioe )
{
throw new LdapException( ioe.getMessage(), ioe );
}
// --------------------------------------------------------------------
// Create all the bootstrap entries before initializing chain
// --------------------------------------------------------------------
try
{
firstStart = createBootstrapEntries();
}
catch ( IOException ioe )
{
throw new LdapException( ioe.getMessage(), ioe );
}
// initialize schema providers
atProvider = new AttributeTypeProvider( schemaManager );
ocProvider = new ObjectClassProvider( schemaManager );
// Initialize the interceptors
initInterceptors();
// --------------------------------------------------------------------
// Initialize the changeLog if it's enabled
// --------------------------------------------------------------------
if ( changeLog.isEnabled() )
{
changeLog.init( this );
if ( changeLog.isExposed() && changeLog.isTagSearchSupported() )
{
String clSuffix = ( ( TaggableSearchableChangeLogStore ) changeLog.getChangeLogStore() ).getPartition()
.getSuffixDn().getName();
partitionNexus.getRootDse( null ).add( ApacheSchemaConstants.CHANGELOG_CONTEXT_AT, clSuffix );
}
}
// --------------------------------------------------------------------
// Initialize the journal if it's enabled
// --------------------------------------------------------------------
if ( journal.isEnabled() )
{
journal.init( this );
}
if ( LOG.isDebugEnabled() )
{
LOG.debug( "<--- DefaultDirectoryService initialized" );
}
}
/**
* Read an entry (without Dn)
*
* @param text The ldif format file
* @return An entry.
*/
private Entry readEntry( String text )
{
StringReader strIn = new StringReader( text );
BufferedReader in = new BufferedReader( strIn );
String line = null;
Entry entry = new DefaultEntry();
try
{
while ( ( line = in.readLine() ) != null )
{
if ( line.length() == 0 )
{
continue;
}
String addedLine = line.trim();
if ( Strings.isEmpty( addedLine ) )
{
continue;
}
Attribute attribute = LdifReader.parseAttributeValue( addedLine );
Attribute oldAttribute = entry.get( attribute.getId() );
if ( oldAttribute != null )
{
try
{
oldAttribute.add( attribute.get() );
entry.put( oldAttribute );
}
catch ( LdapException ne )
{
// Do nothing
}
}
else
{
try
{
entry.put( attribute );
}
catch ( LdapException ne )
{
// TODO do nothing ...
}
}
}
}
catch ( IOException ioe )
{
// Do nothing : we can't reach this point !
}
return entry;
}
/**
* Create a new Entry
*
* @param ldif The String representing the attributes, as a LDIF file
* @param dn The Dn for this new entry
*/
public Entry newEntry( String ldif, String dn )
{
try
{
Entry entry = readEntry( ldif );
Dn newDn = getDnFactory().create( dn );
entry.setDn( newDn );
return new DefaultEntry( schemaManager, entry );
}
catch ( Exception e )
{
LOG.error( I18n.err( I18n.ERR_78, ldif, dn ) );
// do nothing
return null;
}
}
public EventService getEventService()
{
return eventService;
}
public void setEventService( EventService eventService )
{
this.eventService = eventService;
}
/**
* {@inheritDoc}
*/
public boolean isPasswordHidden()
{
return passwordHidden;
}
/**
* {@inheritDoc}
*/
public void setPasswordHidden( boolean passwordHidden )
{
this.passwordHidden = passwordHidden;
}
/**
* @return The maximum allowed size for an incoming PDU
*/
public int getMaxPDUSize()
{
return maxPDUSize;
}
/**
* Set the maximum allowed size for an incoming PDU
* @param maxPDUSize A positive number of bytes for the PDU. A negative or
* null value will be transformed to {@link Integer#MAX_VALUE}
*/
public void setMaxPDUSize( int maxPDUSize )
{
if ( maxPDUSize <= 0 )
{
maxPDUSize = Integer.MAX_VALUE;
}
this.maxPDUSize = maxPDUSize;
}
/**
* {@inheritDoc}
*/
public Interceptor getInterceptor( String interceptorName )
{
return interceptorNames.get( interceptorName );
}
/**
* {@inheritDoc}
*/
public void addFirst( Interceptor interceptor ) throws LdapException
{
addInterceptor( interceptor, 0 );
}
/**
* {@inheritDoc}
*/
public void addLast( Interceptor interceptor ) throws LdapException
{
addInterceptor( interceptor, -1 );
}
/**
* {@inheritDoc}
*/
public void addAfter( String interceptorName, Interceptor interceptor )
{
writeLock.lock();
try
{
int position = 0;
// Find the position
for ( Interceptor inter : interceptors )
{
if ( interceptorName.equals( inter.getName() ) )
{
break;
}
position++;
}
if ( position == interceptors.size() )
{
interceptors.add( interceptor );
}
else
{
interceptors.add( position, interceptor );
}
}
finally
{
writeLock.unlock();
}
}
/**
* {@inheritDoc}
*/
public void remove( String interceptorName )
{
removeOperationsList( interceptorName );
}
/**
* Get a new CSN
* @return The CSN generated for this directory service
*/
public Csn getCSN()
{
return csnFactory.newInstance();
}
/**
* @return the replicaId
*/
public int getReplicaId()
{
return replicaId;
}
/**
* @param replicaId the replicaId to set
*/
public void setReplicaId( int replicaId )
{
if ( ( replicaId < 0 ) || ( replicaId > 999 ) )
{
LOG.error( I18n.err( I18n.ERR_79 ) );
this.replicaId = 0;
}
else
{
this.replicaId = replicaId;
}
}
/**
* {@inheritDoc}
*/
public long getSyncPeriodMillis()
{
return syncPeriodMillis;
}
/**
* {@inheritDoc}
*/
public void setSyncPeriodMillis( long syncPeriodMillis )
{
this.syncPeriodMillis = syncPeriodMillis;
}
/**
* checks if the working directory is already in use by some other directory service, if yes
* then throws a runtime exception else will obtain the lock on the working directory
*/
private void lockWorkDir()
{
FileLock fileLock = null;
try
{
lockFile = new RandomAccessFile( new File( instanceLayout.getInstanceDirectory(), LOCK_FILE_NAME ), "rw" );
try
{
fileLock = lockFile.getChannel().tryLock( 0, 1, false );
}
catch ( IOException e )
{
// shoudn't happen, but log
LOG.error( "failed to lock the work directory", e );
}
catch ( OverlappingFileLockException e ) // thrown if we can't get a lock
{
fileLock = null;
}
}
catch ( FileNotFoundException e )
{
// shouldn't happen, but log anyway
LOG.error( "failed to lock the work directory", e );
}
if ( ( fileLock == null ) || ( !fileLock.isValid() ) )
{
String message = "the working directory " + instanceLayout.getRunDirectory()
+ " has been locked by another directory service.";
LOG.error( message );
throw new RuntimeException( message );
}
}
/**
* {@inheritDoc}
*/
public DnNode<AccessControlAdministrativePoint> getAccessControlAPCache()
{
return accessControlAPCache;
}
/**
* {@inheritDoc}
*/
public DnNode<CollectiveAttributeAdministrativePoint> getCollectiveAttributeAPCache()
{
return collectiveAttributeAPCache;
}
/**
* {@inheritDoc}
*/
public DnNode<SubschemaAdministrativePoint> getSubschemaAPCache()
{
return subschemaAPCache;
}
/**
* {@inheritDoc}
*/
public DnNode<TriggerExecutionAdministrativePoint> getTriggerExecutionAPCache()
{
return triggerExecutionAPCache;
}
/**
* {@inheritDoc}
*/
public boolean isPwdPolicyEnabled()
{
AuthenticationInterceptor authenticationInterceptor = ( AuthenticationInterceptor ) getInterceptor( InterceptorEnum.AUTHENTICATION_INTERCEPTOR
.getName() );
if ( authenticationInterceptor == null )
{
return false;
}
PpolicyConfigContainer pwdPolicyContainer = authenticationInterceptor.getPwdPolicyContainer();
return ( ( pwdPolicyContainer != null )
&& ( ( pwdPolicyContainer.getDefaultPolicy() != null )
|| ( pwdPolicyContainer.hasCustomConfigs() ) ) );
}
/**
* {@inheritDoc}
*/
public DnFactory getDnFactory()
{
return dnFactory;
}
/**
* {@inheritDoc}
*/
public void setDnFactory( DnFactory dnFactory )
{
this.dnFactory = dnFactory;
}
/**
* {@inheritDoc}
*/
public SubentryCache getSubentryCache()
{
return subentryCache;
}
/**
* {@inheritDoc}
*/
public SubtreeEvaluator getEvaluator()
{
return evaluator;
}
/**
* {@inheritDoc}
*/
@Override
public AttributeTypeProvider getAtProvider()
{
return atProvider;
}
/**
* {@inheritDoc}
*/
@Override
public ObjectClassProvider getOcProvider()
{
return ocProvider;
}
/**
* {@inheritDoc}
*/
@Override
public TimeProvider getTimeProvider()
{
return timeProvider;
}
/**
* {@inheritDoc}
*/
@Override
public void setTimeProvider( TimeProvider timeProvider )
{
this.timeProvider = timeProvider;
}
}