Base commit sha: ab7b4ca7731fcab279d67a7837e195f40ebb378a (https://github.com/apache/storm/commit/ab7b4ca7731fcab279d67a7837e195f40ebb378a)
storm-client
Config.java
/** * @deprecated this is no longer supported * Configure the wait strategy used for internal queuing. Can be used to tradeoff latency * vs. throughput */ @Deprecated @isString public static final String TOPOLOGY_DISRUPTOR_WAIT_STRATEGY="topology.disruptor.wait.strategy";
/** * The host that the master server is running on, added only for backward compatibility, * the usage deprecated in favor of nimbus.seeds config. */ @Deprecated @isString public static final String NIMBUS_HOST = "nimbus.host";
/** * Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible *@deprecated "Since netty clients should never stop reconnecting - this does not make sense anymore. */ @Deprecated @isInteger public static final String STORM_MESSAGING_NETTY_MAX_RETRIES = "storm.messaging.netty.max_retries";
/** * Whether or not to use ZeroMQ for messaging in local mode. If this is set * to false, then Storm will use a pure-Java messaging system. The purpose * of this flag is to make it easy to run Storm in local mode by eliminating * the need for native dependencies, which can be difficult to install. * * Defaults to false. */ @isBoolean public static final String STORM_LOCAL_MODE_ZMQ = "storm.local.mode.zmq";
Note: storm.local.mode.zmq is not annotated as deprecated, but it is not used from fairly long ago. It was effectively deprecated.
StormCommon.java
@Deprecated public static String getStormId(final IStormClusterState stormClusterState, final String topologyName) { return stormClusterState.getTopoId(topologyName).get(); }
LinearDRPCTopologyBuilder.java
// Trident subsumes the functionality provided by this class, so it's deprecated @Deprecated public class LinearDRPCTopologyBuilder {
INimbusCredentialPlugin.java
@Deprecated default void populateCredentials(Map<String, String> credentials, Map<String, Object> topologyConf) {
ICredentialsRenewer.java
/** * Renew any credentials that need to be renewed. (Update the credentials if needed) * @param credentials the credentials that may have something to renew. * @param topologyConf topology configuration. */ @Deprecated default void renew(Map<String, String> credentials, Map<String, Object> topologyConf) {
PlainClientCallbackHandler.java
/** * This should only ever be used for testing. It provides no security at all. * DO NOT USE THIS. The user name is the current user and the password is * "password". */ @Deprecated public class PlainClientCallbackHandler extends SimpleSaslClientCallbackHandler {
PlainSaslTransportPlugin.java
/** * This should never be used except for testing. It provides no security at all. * The password is hard coded, and even if it were not it is sent in plain text. */ @Deprecated public class PlainSaslTransportPlugin extends SaslTransportPlugin {
SaslPlainServer.java
@Deprecated public class SaslPlainServer implements SaslServer {
DefaultSerializationDelegate.java
@Deprecated public class DefaultSerializationDelegate implements SerializationDelegate {
GzipBridgeSerializationDelegate.java
/** * Always writes gzip out, but tests incoming to see if it's gzipped. If it is, deserializes with gzip. If not, uses * {@link org.apache.storm.serialization.DefaultSerializationDelegate} to deserialize. Any logic needing to be enabled * via {@link #prepare(java.util.Map)} is passed through to both delegates. */ @Deprecated public class GzipBridgeSerializationDelegate implements SerializationDelegate {
FixedTupleSpout.java
/** * @deprecated please use {@link #FixedTupleSpout(List, Fields)} */ @Deprecated public FixedTupleSpout(List tuples, String fieldName) { this(tuples, new Fields(fieldName)); }
TransactionalTopologyBuilder.java
/** * Trident subsumes the functionality provided by transactional topologies, so this * class is deprecated. * */ @Deprecated public class TransactionalTopologyBuilder {
Tuple.java
/** * Returns the global stream id (component + stream) of this tuple. * * @deprecated replaced by {@link #getSourceGlobalStreamId()} due to broken naming convention */ @Deprecated public GlobalStreamId getSourceGlobalStreamid();
Time.java
@Deprecated public static void startSimulating() {
@Deprecated public static void stopSimulating() {
TimeCacheMap.java
//deprecated in favor of non-threaded RotatingMap @Deprecated public class TimeCacheMap<K, V> {
storm-server
Testing.java
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder()....build()) { * ... * } * ``` * @param clusterConf some configs to set in the cluster */ @Deprecated public static ILocalCluster getLocalCluster(Map<String, Object> clusterConf) {
/** * In a tracked topology some metrics are tracked. This provides a way to get those metrics. * This is intended mostly for internal testing. * @param id the id of the tracked cluster * @param key the name of the metric to get. * @return the metric */ @SuppressWarnings("unchecked") @Deprecated public static int globalAmt(String id, String key) {
/** * Create a tracked topology. * @deprecated use {@link org.apache.storm.testing.TrackedTopology} directly. */ @Deprecated public static TrackedTopology mkTrackedTopology(ILocalCluster cluster, StormTopology topology) {
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder()....build()) { * ... * } * ``` * @param param configs to set in the cluster * @param code what to run */ @Deprecated public static void withLocalCluster(MkClusterParam param, TestJob code) {
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster()) { * ... * } * ``` * @param code what to run */ @Deprecated public static void withLocalCluster(TestJob code) {
/** * Run with simulated time * @deprecated use ``` * try (Time.SimulatedTime time = new Time.SimulatedTime()) { * ... * } * ``` * @param code what to run */ @Deprecated public static void withSimulatedTime(Runnable code) {
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime()....build()) { * ... * } * ``` * @param param configs to set in the cluster * @param code what to run */ @Deprecated public static void withSimulatedTimeLocalCluster(MkClusterParam param, TestJob code) {
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) { * ... * } * ``` * @param code what to run */ @Deprecated public static void withSimulatedTimeLocalCluster(TestJob code) {
/** * Run with a local tracked cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder().withTracked()....build()) { * ... * } * ``` * @param param configs to set in the cluster * @param code what to run */ @Deprecated public static void withTrackedCluster(MkClusterParam param, TestJob code) {
/** * Run with a local cluster * @deprecated use ``` * try (LocalCluster cluster = new LocalCluster.Builder().withTracked().build()) { * ... * } * ``` * @param code what to run */ @Deprecated public static void withTrackedCluster(TestJob code) {
SchedulerAssignmentImpl.java
@Deprecated public void assign(WorkerSlot slot, Collection<ExecutorDetails> executors) {
storm-hdfs
Configs.java
/** * @deprecated please use {@link HdfsSpout.setReaderType(String)} */ @Deprecated @isString @CustomValidator(validatorClass = ReaderTypeValidator.class) public static final String READER_TYPE = "hdfsspout.reader.type"; // Required - chose the file type being consumed
/** * @deprecated please use {@link HdfsSpout#setHdfsUri(String)} */ @Deprecated @isString public static final String HDFS_URI = "hdfsspout.hdfs"; // Required - HDFS name node
/** * @deprecated please use {@link HdfsSpout#setSourceDir(String)} */ @Deprecated @isString public static final String SOURCE_DIR = "hdfsspout.source.dir"; // Required - dir from which to read files
/** * @deprecated please use {@link HdfsSpout#setArchiveDir(String)} */ @Deprecated @isString public static final String ARCHIVE_DIR = "hdfsspout.archive.dir"; // Required - completed files will be moved here
/** * @deprecated please use {@link HdfsSpout#setBadFilesDir(String)} */ @Deprecated @isString public static final String BAD_DIR = "hdfsspout.badfiles.dir"; // Required - unparsable files will be moved here
/** * @deprecated please use {@link HdfsSpout#setLockDir(String)} */ @Deprecated @isString public static final String LOCK_DIR = "hdfsspout.lock.dir"; // dir in which lock files will be created
/** * @deprecated please use {@link HdfsSpout#setCommitFrequencyCount(int)} */ @Deprecated @isInteger @isPositiveNumber(includeZero=true) public static final String COMMIT_FREQ_COUNT = "hdfsspout.commit.count"; // commit after N records. 0 disables this.
/** * @deprecated please use {@link HdfsSpout#setCommitFrequencySec(int)} */ @Deprecated @isInteger @isPositiveNumber public static final String COMMIT_FREQ_SEC = "hdfsspout.commit.sec"; // commit after N secs. cannot be disabled.
/** * @deprecated please use {@link HdfsSpout#setMaxOutstanding(int)} */ @Deprecated @isInteger @isPositiveNumber(includeZero=true) public static final String MAX_OUTSTANDING = "hdfsspout.max.outstanding";
/** * @deprecated please use {@link HdfsSpout#setLockTimeoutSec(int)} */ @Deprecated @isInteger @isPositiveNumber public static final String LOCK_TIMEOUT = "hdfsspout.lock.timeout.sec"; // inactivity duration after which locks are considered candidates for being reassigned to another spout
/** * @deprecated please use {@link HdfsSpout#setClocksInSync(boolean)} */ @Deprecated @isBoolean public static final String CLOCKS_INSYNC = "hdfsspout.clocks.insync"; // if clocks on machines in the Storm cluster are in sync
/** * @deprecated please use {@link HdfsSpout#setIgnoreSuffix(String)} */ @Deprecated @isString public static final String IGNORE_SUFFIX = "hdfsspout.ignore.suffix"; // filenames with this suffix in archive dir will be ignored by the Spout
HdfsState.java
@Deprecated public HdfsFileOptions addRotationAction(RotationAction action) {
storm-kafka
KafkaBolt.java
NOTE: We're considering removal of storm-kafka for Storm 2.0.0. Related PR: https://github.com/apache/storm/pull/2559
/** * Bolt implementation that can send Tuple data to Kafka * <p/> * It expects the producer configuration and topic in storm config under * <p/> * 'kafka.broker.properties' and 'topic' * <p/> * respectively. * <p/> * This bolt uses 0.8.2 Kafka Producer API. * <p/> * It works for sending tuples to older Kafka version (0.8.1). * @deprecated Please use the KafkaBolt in storm-kafka-client */ @Deprecated public class KafkaBolt<K, V> extends BaseTickTupleAwareRichBolt {
storm-solr
RestJsonSchemaBuilder.java
/** * Class that builds the {@link Schema} object from the JSON representation of the schema as returned by the * URL of the form http://localhost:8983/solr/gettingstarted/schema/ . This particular URL returns the schema * in JSON format for the gettingstarted example running locally. * @deprecated Use RestJsonSchemaBuilder2 instead, as this doesn't support Kerberos authentication */ @Deprecated public class RestJsonSchemaBuilder implements SchemaBuilder {