001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.camel.processor;
018
019import java.io.Closeable;
020import java.util.ArrayList;
021import java.util.Collection;
022import java.util.Iterator;
023import java.util.List;
024import java.util.Map;
025import java.util.concurrent.Callable;
026import java.util.concurrent.CompletionService;
027import java.util.concurrent.ConcurrentHashMap;
028import java.util.concurrent.ConcurrentMap;
029import java.util.concurrent.CountDownLatch;
030import java.util.concurrent.ExecutionException;
031import java.util.concurrent.ExecutorCompletionService;
032import java.util.concurrent.ExecutorService;
033import java.util.concurrent.Future;
034import java.util.concurrent.TimeUnit;
035import java.util.concurrent.atomic.AtomicBoolean;
036import java.util.concurrent.atomic.AtomicInteger;
037
038import org.apache.camel.AsyncCallback;
039import org.apache.camel.AsyncProcessor;
040import org.apache.camel.CamelContext;
041import org.apache.camel.CamelExchangeException;
042import org.apache.camel.Endpoint;
043import org.apache.camel.ErrorHandlerFactory;
044import org.apache.camel.Exchange;
045import org.apache.camel.Navigate;
046import org.apache.camel.Processor;
047import org.apache.camel.Producer;
048import org.apache.camel.StreamCache;
049import org.apache.camel.Traceable;
050import org.apache.camel.processor.aggregate.AggregationStrategy;
051import org.apache.camel.processor.aggregate.CompletionAwareAggregationStrategy;
052import org.apache.camel.processor.aggregate.DelegateAggregationStrategy;
053import org.apache.camel.processor.aggregate.TimeoutAwareAggregationStrategy;
054import org.apache.camel.spi.IdAware;
055import org.apache.camel.spi.RouteContext;
056import org.apache.camel.spi.TracedRouteNodes;
057import org.apache.camel.spi.UnitOfWork;
058import org.apache.camel.support.ServiceSupport;
059import org.apache.camel.util.AsyncProcessorConverterHelper;
060import org.apache.camel.util.AsyncProcessorHelper;
061import org.apache.camel.util.CastUtils;
062import org.apache.camel.util.EventHelper;
063import org.apache.camel.util.ExchangeHelper;
064import org.apache.camel.util.IOHelper;
065import org.apache.camel.util.KeyValueHolder;
066import org.apache.camel.util.ObjectHelper;
067import org.apache.camel.util.ServiceHelper;
068import org.apache.camel.util.StopWatch;
069import org.apache.camel.util.concurrent.AtomicException;
070import org.apache.camel.util.concurrent.AtomicExchange;
071import org.apache.camel.util.concurrent.SubmitOrderedCompletionService;
072import org.slf4j.Logger;
073import org.slf4j.LoggerFactory;
074
075import static org.apache.camel.util.ObjectHelper.notNull;
076
077
078/**
079 * Implements the Multicast pattern to send a message exchange to a number of
080 * endpoints, each endpoint receiving a copy of the message exchange.
081 *
082 * @version
083 * @see Pipeline
084 */
085public class MulticastProcessor extends ServiceSupport implements AsyncProcessor, Navigate<Processor>, Traceable, IdAware {
086
087    private static final Logger LOG = LoggerFactory.getLogger(MulticastProcessor.class);
088
089    /**
090     * Class that represent each step in the multicast route to do
091     */
092    static final class DefaultProcessorExchangePair implements ProcessorExchangePair {
093        private final int index;
094        private final Processor processor;
095        private final Processor prepared;
096        private final Exchange exchange;
097
098        private DefaultProcessorExchangePair(int index, Processor processor, Processor prepared, Exchange exchange) {
099            this.index = index;
100            this.processor = processor;
101            this.prepared = prepared;
102            this.exchange = exchange;
103        }
104
105        public int getIndex() {
106            return index;
107        }
108
109        public Exchange getExchange() {
110            return exchange;
111        }
112
113        public Producer getProducer() {
114            if (processor instanceof Producer) {
115                return (Producer) processor;
116            }
117            return null;
118        }
119
120        public Processor getProcessor() {
121            return prepared;
122        }
123
124        public void begin() {
125            // noop
126        }
127
128        public void done() {
129            // noop
130        }
131
132    }
133
134    /**
135     * Class that represents prepared fine grained error handlers when processing multicasted/splitted exchanges
136     * <p/>
137     * See the <tt>createProcessorExchangePair</tt> and <tt>createErrorHandler</tt> methods.
138     */
139    static final class PreparedErrorHandler extends KeyValueHolder<RouteContext, Processor> {
140
141        PreparedErrorHandler(RouteContext key, Processor value) {
142            super(key, value);
143        }
144
145    }
146
147    protected final Processor onPrepare;
148    private final CamelContext camelContext;
149    private String id;
150    private Collection<Processor> processors;
151    private final AggregationStrategy aggregationStrategy;
152    private final boolean parallelProcessing;
153    private final boolean streaming;
154    private final boolean parallelAggregate;
155    private final boolean stopOnAggregateException;
156    private final boolean stopOnException;
157    private final ExecutorService executorService;
158    private final boolean shutdownExecutorService;
159    private ExecutorService aggregateExecutorService;
160    private final long timeout;
161    private final ConcurrentMap<PreparedErrorHandler, Processor> errorHandlers = new ConcurrentHashMap<PreparedErrorHandler, Processor>();
162    private final boolean shareUnitOfWork;
163
164    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors) {
165        this(camelContext, processors, null);
166    }
167
168    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy) {
169        this(camelContext, processors, aggregationStrategy, false, null, false, false, false, 0, null, false, false);
170    }
171
172    @Deprecated
173    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
174                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService,
175                              boolean streaming, boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork) {
176        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService,
177                streaming, stopOnException, timeout, onPrepare, shareUnitOfWork, false);
178    }
179
180    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy, boolean parallelProcessing,
181                              ExecutorService executorService, boolean shutdownExecutorService, boolean streaming, boolean stopOnException, long timeout, Processor onPrepare,
182                              boolean shareUnitOfWork, boolean parallelAggregate) {
183        this(camelContext, processors, aggregationStrategy, parallelProcessing, executorService, shutdownExecutorService, streaming, stopOnException, timeout, onPrepare,
184             shareUnitOfWork, false, false);
185    }
186    
187    public MulticastProcessor(CamelContext camelContext, Collection<Processor> processors, AggregationStrategy aggregationStrategy,
188                              boolean parallelProcessing, ExecutorService executorService, boolean shutdownExecutorService, boolean streaming,
189                              boolean stopOnException, long timeout, Processor onPrepare, boolean shareUnitOfWork,
190                              boolean parallelAggregate, boolean stopOnAggregateException) {
191        notNull(camelContext, "camelContext");
192        this.camelContext = camelContext;
193        this.processors = processors;
194        this.aggregationStrategy = aggregationStrategy;
195        this.executorService = executorService;
196        this.shutdownExecutorService = shutdownExecutorService;
197        this.streaming = streaming;
198        this.stopOnException = stopOnException;
199        // must enable parallel if executor service is provided
200        this.parallelProcessing = parallelProcessing || executorService != null;
201        this.timeout = timeout;
202        this.onPrepare = onPrepare;
203        this.shareUnitOfWork = shareUnitOfWork;
204        this.parallelAggregate = parallelAggregate;
205        this.stopOnAggregateException = stopOnAggregateException;
206    }
207
208    @Override
209    public String toString() {
210        return "Multicast[" + getProcessors() + "]";
211    }
212
213    public String getId() {
214        return id;
215    }
216
217    public void setId(String id) {
218        this.id = id;
219    }
220
221    public String getTraceLabel() {
222        return "multicast";
223    }
224
225    public CamelContext getCamelContext() {
226        return camelContext;
227    }
228
229    public void process(Exchange exchange) throws Exception {
230        AsyncProcessorHelper.process(this, exchange);
231    }
232
233    public boolean process(Exchange exchange, AsyncCallback callback) {
234        final AtomicExchange result = new AtomicExchange();
235        Iterable<ProcessorExchangePair> pairs = null;
236
237        try {
238            boolean sync = true;
239
240            pairs = createProcessorExchangePairs(exchange);
241
242            if (isParallelProcessing()) {
243                // ensure an executor is set when running in parallel
244                ObjectHelper.notNull(executorService, "executorService", this);
245                doProcessParallel(exchange, result, pairs, isStreaming(), callback);
246            } else {
247                sync = doProcessSequential(exchange, result, pairs, callback);
248            }
249
250            if (!sync) {
251                // the remainder of the multicast will be completed async
252                // so we break out now, then the callback will be invoked which then continue routing from where we left here
253                return false;
254            }
255        } catch (Throwable e) {
256            exchange.setException(e);
257            // unexpected exception was thrown, maybe from iterator etc. so do not regard as exhausted
258            // and do the done work
259            doDone(exchange, null, pairs, callback, true, false);
260            return true;
261        }
262
263        // multicasting was processed successfully
264        // and do the done work
265        Exchange subExchange = result.get() != null ? result.get() : null;
266        doDone(exchange, subExchange, pairs, callback, true, true);
267        return true;
268    }
269
270    protected void doProcessParallel(final Exchange original, final AtomicExchange result, final Iterable<ProcessorExchangePair> pairs,
271                                     final boolean streaming, final AsyncCallback callback) throws Exception {
272
273        ObjectHelper.notNull(executorService, "ExecutorService", this);
274        ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);
275
276        final CompletionService<Exchange> completion;
277        if (streaming) {
278            // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
279            completion = new ExecutorCompletionService<Exchange>(executorService);
280        } else {
281            // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
282            completion = new SubmitOrderedCompletionService<Exchange>(executorService);
283        }
284
285        final AtomicInteger total = new AtomicInteger(0);
286        final Iterator<ProcessorExchangePair> it = pairs.iterator();
287
288        if (it.hasNext()) {
289            // when parallel then aggregate on the fly
290            final AtomicBoolean running = new AtomicBoolean(true);
291            final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
292            final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
293            final AtomicException executionException = new AtomicException();
294
295            // issue task to execute in separate thread so it can aggregate on-the-fly
296            // while we submit new tasks, and those tasks complete concurrently
297            // this allows us to optimize work and reduce memory consumption
298            final AggregateOnTheFlyTask aggregateOnTheFlyTask = new AggregateOnTheFlyTask(result, original, total, completion, running,
299                    aggregationOnTheFlyDone, allTasksSubmitted, executionException);
300            final AtomicBoolean aggregationTaskSubmitted = new AtomicBoolean();
301
302            LOG.trace("Starting to submit parallel tasks");
303
304            while (it.hasNext()) {
305                final ProcessorExchangePair pair = it.next();
306                // in case the iterator returns null then continue to next
307                if (pair == null) {
308                    continue;
309                }
310
311                final Exchange subExchange = pair.getExchange();
312                updateNewExchange(subExchange, total.intValue(), pairs, it);
313
314                completion.submit(new Callable<Exchange>() {
315                    public Exchange call() throws Exception {
316                        // start the aggregation task at this stage only in order not to pile up too many threads
317                        if (aggregationTaskSubmitted.compareAndSet(false, true)) {
318                            // but only submit the aggregation task once
319                            aggregateExecutorService.submit(aggregateOnTheFlyTask);
320                        }
321
322                        if (!running.get()) {
323                            // do not start processing the task if we are not running
324                            return subExchange;
325                        }
326
327                        try {
328                            doProcessParallel(pair);
329                        } catch (Throwable e) {
330                            subExchange.setException(e);
331                        }
332
333                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
334                        Integer number = getExchangeIndex(subExchange);
335                        boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
336                        if (stopOnException && !continueProcessing) {
337                            // signal to stop running
338                            running.set(false);
339                            // throw caused exception
340                            if (subExchange.getException() != null) {
341                                // wrap in exception to explain where it failed
342                                CamelExchangeException cause = new CamelExchangeException("Parallel processing failed for number " + number, subExchange, subExchange.getException());
343                                subExchange.setException(cause);
344                            }
345                        }
346
347                        LOG.trace("Parallel processing complete for exchange: {}", subExchange);
348                        return subExchange;
349                    }
350                });
351
352                total.incrementAndGet();
353            }
354
355            // signal all tasks has been submitted
356            LOG.trace("Signaling that all {} tasks has been submitted.", total.get());
357            allTasksSubmitted.set(true);
358
359            // its to hard to do parallel async routing so we let the caller thread be synchronously
360            // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
361            // wait for aggregation to be done
362            LOG.debug("Waiting for on-the-fly aggregation to complete aggregating {} responses for exchangeId: {}", total.get(), original.getExchangeId());
363            aggregationOnTheFlyDone.await();
364
365            // did we fail for whatever reason, if so throw that caused exception
366            if (executionException.get() != null) {
367                if (LOG.isDebugEnabled()) {
368                    LOG.debug("Parallel processing failed due {}", executionException.get().getMessage());
369                }
370                throw executionException.get();
371            }
372        }
373
374        // no everything is okay so we are done
375        LOG.debug("Done parallel processing {} exchanges", total);
376    }
377
378    /**
379     * Boss worker to control aggregate on-the-fly for completed tasks when using parallel processing.
380     * <p/>
381     * This ensures lower memory consumption as we do not need to keep all completed tasks in memory
382     * before we perform aggregation. Instead this separate thread will run and aggregate when new
383     * completed tasks is done.
384     * <p/>
385     * The logic is fairly complex as this implementation has to keep track how far it got, and also
386     * signal back to the <i>main</t> thread when its done, so the <i>main</t> thread can continue
387     * processing when the entire splitting is done.
388     */
389    private final class AggregateOnTheFlyTask implements Runnable {
390
391        private final AtomicExchange result;
392        private final Exchange original;
393        private final AtomicInteger total;
394        private final CompletionService<Exchange> completion;
395        private final AtomicBoolean running;
396        private final CountDownLatch aggregationOnTheFlyDone;
397        private final AtomicBoolean allTasksSubmitted;
398        private final AtomicException executionException;
399
400        private AggregateOnTheFlyTask(AtomicExchange result, Exchange original, AtomicInteger total,
401                                      CompletionService<Exchange> completion, AtomicBoolean running,
402                                      CountDownLatch aggregationOnTheFlyDone, AtomicBoolean allTasksSubmitted,
403                                      AtomicException executionException) {
404            this.result = result;
405            this.original = original;
406            this.total = total;
407            this.completion = completion;
408            this.running = running;
409            this.aggregationOnTheFlyDone = aggregationOnTheFlyDone;
410            this.allTasksSubmitted = allTasksSubmitted;
411            this.executionException = executionException;
412        }
413
414        public void run() {
415            LOG.trace("Aggregate on the fly task started for exchangeId: {}", original.getExchangeId());
416
417            try {
418                aggregateOnTheFly();
419            } catch (Throwable e) {
420                if (e instanceof Exception) {
421                    executionException.set((Exception) e);
422                } else {
423                    executionException.set(ObjectHelper.wrapRuntimeCamelException(e));
424                }
425            } finally {
426                // must signal we are done so the latch can open and let the other thread continue processing
427                LOG.debug("Signaling we are done aggregating on the fly for exchangeId: {}", original.getExchangeId());
428                LOG.trace("Aggregate on the fly task done for exchangeId: {}", original.getExchangeId());
429                aggregationOnTheFlyDone.countDown();
430            }
431        }
432
433        private void aggregateOnTheFly() throws InterruptedException, ExecutionException {
434            final AtomicBoolean timedOut = new AtomicBoolean();
435            boolean stoppedOnException = false;
436            final StopWatch watch = new StopWatch();
437            final AtomicInteger aggregated = new AtomicInteger();
438            boolean done = false;
439            // not a for loop as on the fly may still run
440            while (!done) {
441                // check if we have already aggregate everything
442                if (allTasksSubmitted.get() && aggregated.intValue() >= total.get()) {
443                    LOG.debug("Done aggregating {} exchanges on the fly.", aggregated);
444                    break;
445                }
446
447                Future<Exchange> future;
448                if (timedOut.get()) {
449                    // we are timed out but try to grab if some tasks has been completed
450                    // poll will return null if no tasks is present
451                    future = completion.poll();
452                    LOG.trace("Polled completion task #{} after timeout to grab already completed tasks: {}", aggregated, future);
453                } else if (timeout > 0) {
454                    long left = timeout - watch.taken();
455                    if (left < 0) {
456                        left = 0;
457                    }
458                    LOG.trace("Polling completion task #{} using timeout {} millis.", aggregated, left);
459                    future = completion.poll(left, TimeUnit.MILLISECONDS);
460                } else {
461                    LOG.trace("Polling completion task #{}", aggregated);
462                    // we must not block so poll every second
463                    future = completion.poll(1, TimeUnit.SECONDS);
464                    if (future == null) {
465                        // and continue loop which will recheck if we are done
466                        continue;
467                    }
468                }
469
470                if (future == null) {
471                    ParallelAggregateTimeoutTask task = new ParallelAggregateTimeoutTask(original, result, completion, aggregated, total, timedOut);
472                    if (parallelAggregate) {
473                        aggregateExecutorService.submit(task);
474                    } else {
475                        // in non parallel mode then just run the task
476                        task.run();
477                    }
478                } else {
479                    // there is a result to aggregate
480                    Exchange subExchange = future.get();
481
482                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
483                    Integer number = getExchangeIndex(subExchange);
484                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Parallel processing failed for number " + number, LOG);
485                    if (stopOnException && !continueProcessing) {
486                        // we want to stop on exception and an exception or failure occurred
487                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
488                        // so we should set the failed exchange as the result and break out
489                        result.set(subExchange);
490                        stoppedOnException = true;
491                        break;
492                    }
493
494                    // we got a result so aggregate it
495                    ParallelAggregateTask task = new ParallelAggregateTask(result, subExchange, aggregated);
496                    if (parallelAggregate) {
497                        aggregateExecutorService.submit(task);
498                    } else {
499                        // in non parallel mode then just run the task
500                        task.run();
501                    }
502                }
503            }
504
505            if (timedOut.get() || stoppedOnException) {
506                if (timedOut.get()) {
507                    LOG.debug("Cancelling tasks due timeout after {} millis.", timeout);
508                }
509                if (stoppedOnException) {
510                    LOG.debug("Cancelling tasks due stopOnException.");
511                }
512                // cancel tasks as we timed out (its safe to cancel done tasks)
513                running.set(false);
514            }
515        }
516    }
517
518    /**
519     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
520     */
521    private final class ParallelAggregateTask implements Runnable {
522
523        private final AtomicExchange result;
524        private final Exchange subExchange;
525        private final AtomicInteger aggregated;
526
527        private ParallelAggregateTask(AtomicExchange result, Exchange subExchange, AtomicInteger aggregated) {
528            this.result = result;
529            this.subExchange = subExchange;
530            this.aggregated = aggregated;
531        }
532
533        @Override
534        public void run() {
535            try {
536                if (parallelAggregate) {
537                    doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
538                } else {
539                    doAggregate(getAggregationStrategy(subExchange), result, subExchange);
540                }
541            } catch (Throwable e) {
542                if (isStopOnAggregateException()) {
543                    throw e;
544                } else {
545                    // wrap in exception to explain where it failed
546                    CamelExchangeException cex = new CamelExchangeException("Parallel processing failed for number " + aggregated.get(), subExchange, e);
547                    subExchange.setException(cex);
548                    LOG.debug(cex.getMessage(), cex);
549                }
550            } finally {
551                aggregated.incrementAndGet();
552            }
553        }
554    }
555
556    /**
557     * Worker task to aggregate the old and new exchange on-the-fly for completed tasks when using parallel processing.
558     */
559    private final class ParallelAggregateTimeoutTask implements Runnable {
560
561        private final Exchange original;
562        private final AtomicExchange result;
563        private final CompletionService<Exchange> completion;
564        private final AtomicInteger aggregated;
565        private final AtomicInteger total;
566        private final AtomicBoolean timedOut;
567
568        private ParallelAggregateTimeoutTask(Exchange original, AtomicExchange result, CompletionService<Exchange> completion,
569                                             AtomicInteger aggregated, AtomicInteger total, AtomicBoolean timedOut) {
570            this.original = original;
571            this.result = result;
572            this.completion = completion;
573            this.aggregated = aggregated;
574            this.total = total;
575            this.timedOut = timedOut;
576        }
577
578        @Override
579        public void run() {
580            AggregationStrategy strategy = getAggregationStrategy(null);
581            if (strategy instanceof DelegateAggregationStrategy) {
582                strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
583            }
584            if (strategy instanceof TimeoutAwareAggregationStrategy) {
585                // notify the strategy we timed out
586                Exchange oldExchange = result.get();
587                if (oldExchange == null) {
588                    // if they all timed out the result may not have been set yet, so use the original exchange
589                    oldExchange = original;
590                }
591                ((TimeoutAwareAggregationStrategy) strategy).timeout(oldExchange, aggregated.intValue(), total.intValue(), timeout);
592            } else {
593                // log a WARN we timed out since it will not be aggregated and the Exchange will be lost
594                LOG.warn("Parallel processing timed out after {} millis for number {}. This task will be cancelled and will not be aggregated.", timeout, aggregated.intValue());
595            }
596            LOG.debug("Timeout occurred after {} millis for number {} task.", timeout, aggregated.intValue());
597            timedOut.set(true);
598
599            // mark that index as timed out, which allows us to try to retrieve
600            // any already completed tasks in the next loop
601            if (completion instanceof SubmitOrderedCompletionService) {
602                ((SubmitOrderedCompletionService<?>) completion).timeoutTask();
603            }
604
605            // we timed out so increment the counter
606            aggregated.incrementAndGet();
607        }
608    }
609
610    protected boolean doProcessSequential(Exchange original, AtomicExchange result, Iterable<ProcessorExchangePair> pairs, AsyncCallback callback) throws Exception {
611        AtomicInteger total = new AtomicInteger();
612        Iterator<ProcessorExchangePair> it = pairs.iterator();
613
614        while (it.hasNext()) {
615            ProcessorExchangePair pair = it.next();
616            // in case the iterator returns null then continue to next
617            if (pair == null) {
618                continue;
619            }
620            Exchange subExchange = pair.getExchange();
621            updateNewExchange(subExchange, total.get(), pairs, it);
622
623            boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
624            if (!sync) {
625                if (LOG.isTraceEnabled()) {
626                    LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", pair.getExchange().getExchangeId());
627                }
628                // the remainder of the multicast will be completed async
629                // so we break out now, then the callback will be invoked which then continue routing from where we left here
630                return false;
631            }
632
633            if (LOG.isTraceEnabled()) {
634                LOG.trace("Processing exchangeId: {} is continued being processed synchronously", pair.getExchange().getExchangeId());
635            }
636
637            // Decide whether to continue with the multicast or not; similar logic to the Pipeline
638            // remember to test for stop on exception and aggregate before copying back results
639            boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
640            if (stopOnException && !continueProcessing) {
641                if (subExchange.getException() != null) {
642                    // wrap in exception to explain where it failed
643                    CamelExchangeException cause = new CamelExchangeException("Sequential processing failed for number " + total.get(), subExchange, subExchange.getException());
644                    subExchange.setException(cause);
645                }
646                // we want to stop on exception, and the exception was handled by the error handler
647                // this is similar to what the pipeline does, so we should do the same to not surprise end users
648                // so we should set the failed exchange as the result and be done
649                result.set(subExchange);
650                return true;
651            }
652
653            LOG.trace("Sequential processing complete for number {} exchange: {}", total, subExchange);
654
655            if (parallelAggregate) {
656                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
657            } else {
658                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
659            }
660
661            total.incrementAndGet();
662        }
663
664        LOG.debug("Done sequential processing {} exchanges", total);
665
666        return true;
667    }
668
669    private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
670                                        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
671                                        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
672        boolean sync = true;
673
674        final Exchange exchange = pair.getExchange();
675        Processor processor = pair.getProcessor();
676        final Producer producer = pair.getProducer();
677
678        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
679
680        try {
681            // prepare tracing starting from a new block
682            if (traced != null) {
683                traced.pushBlock();
684            }
685
686            StopWatch sw = null;
687            if (producer != null) {
688                boolean sending = EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
689                if (sending) {
690                    sw = new StopWatch();
691                }
692            }
693
694            // compute time taken if sending to another endpoint
695            final StopWatch watch = sw;
696
697            // let the prepared process it, remember to begin the exchange pair
698            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
699            pair.begin();
700            sync = async.process(exchange, new AsyncCallback() {
701                public void done(boolean doneSync) {
702                    // we are done with the exchange pair
703                    pair.done();
704
705                    // okay we are done, so notify the exchange was sent
706                    if (producer != null && watch != null) {
707                        long timeTaken = watch.taken();
708                        Endpoint endpoint = producer.getEndpoint();
709                        // emit event that the exchange was sent to the endpoint
710                        EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
711                    }
712
713                    // we only have to handle async completion of the routing slip
714                    if (doneSync) {
715                        return;
716                    }
717
718                    // continue processing the multicast asynchronously
719                    Exchange subExchange = exchange;
720
721                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
722                    // remember to test for stop on exception and aggregate before copying back results
723                    boolean continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
724                    if (stopOnException && !continueProcessing) {
725                        if (subExchange.getException() != null) {
726                            // wrap in exception to explain where it failed
727                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
728                        } else {
729                            // we want to stop on exception, and the exception was handled by the error handler
730                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
731                            // so we should set the failed exchange as the result and be done
732                            result.set(subExchange);
733                        }
734                        // and do the done work
735                        doDone(original, subExchange, pairs, callback, false, true);
736                        return;
737                    }
738
739                    try {
740                        if (parallelAggregate) {
741                            doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
742                        } else {
743                            doAggregate(getAggregationStrategy(subExchange), result, subExchange);
744                        }
745                    } catch (Throwable e) {
746                        // wrap in exception to explain where it failed
747                        subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
748                        // and do the done work
749                        doDone(original, subExchange, pairs, callback, false, true);
750                        return;
751                    }
752
753                    total.incrementAndGet();
754
755                    // maybe there are more processors to multicast
756                    while (it.hasNext()) {
757
758                        // prepare and run the next
759                        ProcessorExchangePair pair = it.next();
760                        subExchange = pair.getExchange();
761                        updateNewExchange(subExchange, total.get(), pairs, it);
762                        boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);
763
764                        if (!sync) {
765                            LOG.trace("Processing exchangeId: {} is continued being processed asynchronously", original.getExchangeId());
766                            return;
767                        }
768
769                        // Decide whether to continue with the multicast or not; similar logic to the Pipeline
770                        // remember to test for stop on exception and aggregate before copying back results
771                        continueProcessing = PipelineHelper.continueProcessing(subExchange, "Sequential processing failed for number " + total.get(), LOG);
772                        if (stopOnException && !continueProcessing) {
773                            if (subExchange.getException() != null) {
774                                // wrap in exception to explain where it failed
775                                subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, subExchange.getException()));
776                            } else {
777                                // we want to stop on exception, and the exception was handled by the error handler
778                                // this is similar to what the pipeline does, so we should do the same to not surprise end users
779                                // so we should set the failed exchange as the result and be done
780                                result.set(subExchange);
781                            }
782                            // and do the done work
783                            doDone(original, subExchange, pairs, callback, false, true);
784                            return;
785                        }
786
787                        // must catch any exceptions from aggregation
788                        try {
789                            if (parallelAggregate) {
790                                doAggregateInternal(getAggregationStrategy(subExchange), result, subExchange);
791                            } else {
792                                doAggregate(getAggregationStrategy(subExchange), result, subExchange);
793                            }
794                        } catch (Throwable e) {
795                            // wrap in exception to explain where it failed
796                            subExchange.setException(new CamelExchangeException("Sequential processing failed for number " + total, subExchange, e));
797                            // and do the done work
798                            doDone(original, subExchange, pairs, callback, false, true);
799                            return;
800                        }
801
802                        total.incrementAndGet();
803                    }
804
805                    // do the done work
806                    subExchange = result.get() != null ? result.get() : null;
807                    doDone(original, subExchange, pairs, callback, false, true);
808                }
809            });
810        } finally {
811            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
812            if (traced != null) {
813                traced.popBlock();
814            }
815        }
816
817        return sync;
818    }
819
820    private void doProcessParallel(final ProcessorExchangePair pair) throws Exception {
821        final Exchange exchange = pair.getExchange();
822        Processor processor = pair.getProcessor();
823        Producer producer = pair.getProducer();
824
825        TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes() : null;
826
827        // compute time taken if sending to another endpoint
828        StopWatch watch = null;
829        try {
830            // prepare tracing starting from a new block
831            if (traced != null) {
832                traced.pushBlock();
833            }
834
835            if (producer != null) {
836                boolean sending = EventHelper.notifyExchangeSending(exchange.getContext(), exchange, producer.getEndpoint());
837                if (sending) {
838                    watch = new StopWatch();
839                }
840            }
841            // let the prepared process it, remember to begin the exchange pair
842            AsyncProcessor async = AsyncProcessorConverterHelper.convert(processor);
843            pair.begin();
844            // we invoke it synchronously as parallel async routing is too hard
845            AsyncProcessorHelper.process(async, exchange);
846        } finally {
847            pair.done();
848            // pop the block so by next round we have the same staring point and thus the tracing looks accurate
849            if (traced != null) {
850                traced.popBlock();
851            }
852            if (producer != null && watch != null) {
853                Endpoint endpoint = producer.getEndpoint();
854                long timeTaken = watch.taken();
855                // emit event that the exchange was sent to the endpoint
856                // this is okay to do here in the finally block, as the processing is not using the async routing engine
857                //( we invoke it synchronously as parallel async routing is too hard)
858                EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
859            }
860        }
861    }
862
863    /**
864     * Common work which must be done when we are done multicasting.
865     * <p/>
866     * This logic applies for both running synchronous and asynchronous as there are multiple exist points
867     * when using the asynchronous routing engine. And therefore we want the logic in one method instead
868     * of being scattered.
869     *
870     * @param original     the original exchange
871     * @param subExchange  the current sub exchange, can be <tt>null</tt> for the synchronous part
872     * @param pairs        the pairs with the exchanges to process
873     * @param callback     the callback
874     * @param doneSync     the <tt>doneSync</tt> parameter to call on callback
875     * @param forceExhaust whether or not error handling is exhausted
876     */
877    protected void doDone(Exchange original, Exchange subExchange, final Iterable<ProcessorExchangePair> pairs,
878                          AsyncCallback callback, boolean doneSync, boolean forceExhaust) {
879
880        // we are done so close the pairs iterator
881        if (pairs != null && pairs instanceof Closeable) {
882            IOHelper.close((Closeable) pairs, "pairs", LOG);
883        }
884
885        AggregationStrategy strategy = getAggregationStrategy(subExchange);
886        if (strategy instanceof DelegateAggregationStrategy) {
887            strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
888        }
889        // invoke the on completion callback
890        if (strategy instanceof CompletionAwareAggregationStrategy) {
891            ((CompletionAwareAggregationStrategy) strategy).onCompletion(subExchange);
892        }
893
894        // cleanup any per exchange aggregation strategy
895        removeAggregationStrategyFromExchange(original);
896
897        // we need to know if there was an exception, and if the stopOnException option was enabled
898        // also we would need to know if any error handler has attempted redelivery and exhausted
899        boolean stoppedOnException = false;
900        boolean exception = false;
901        boolean exhaust = forceExhaust || subExchange != null && (subExchange.getException() != null || ExchangeHelper.isRedeliveryExhausted(subExchange));
902        if (original.getException() != null || subExchange != null && subExchange.getException() != null) {
903            // there was an exception and we stopped
904            stoppedOnException = isStopOnException();
905            exception = true;
906        }
907
908        // must copy results at this point
909        if (subExchange != null) {
910            if (stoppedOnException) {
911                // if we stopped due an exception then only propagate the exception
912                original.setException(subExchange.getException());
913            } else {
914                // copy the current result to original so it will contain this result of this eip
915                ExchangeHelper.copyResults(original, subExchange);
916            }
917        }
918
919        // .. and then if there was an exception we need to configure the redelivery exhaust
920        // for example the noErrorHandler will not cause redelivery exhaust so if this error
921        // handled has been in use, then the exhaust would be false (if not forced)
922        if (exception) {
923            // multicast uses error handling on its output processors and they have tried to redeliver
924            // so we shall signal back to the other error handlers that we are exhausted and they should not
925            // also try to redeliver as we will then do that twice
926            original.setProperty(Exchange.REDELIVERY_EXHAUSTED, exhaust);
927        }
928
929        callback.done(doneSync);
930    }
931
932    /**
933     * Aggregate the {@link Exchange} with the current result.
934     * This method is synchronized and is called directly when parallelAggregate is disabled (by default).
935     *
936     * @param strategy the aggregation strategy to use
937     * @param result   the current result
938     * @param exchange the exchange to be added to the result
939     * @see #doAggregateInternal(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
940     */
941    protected synchronized void doAggregate(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
942        doAggregateInternal(strategy, result, exchange);
943    }
944
945    /**
946     * Aggregate the {@link Exchange} with the current result.
947     * This method is unsynchronized and is called directly when parallelAggregate is enabled.
948     * In all other cases, this method is called from the doAggregate which is a synchronized method
949     *
950     * @param strategy the aggregation strategy to use
951     * @param result   the current result
952     * @param exchange the exchange to be added to the result
953     * @see #doAggregate(org.apache.camel.processor.aggregate.AggregationStrategy, org.apache.camel.util.concurrent.AtomicExchange, org.apache.camel.Exchange)
954     */
955    protected void doAggregateInternal(AggregationStrategy strategy, AtomicExchange result, Exchange exchange) {
956        if (strategy != null) {
957            // prepare the exchanges for aggregation
958            Exchange oldExchange = result.get();
959            ExchangeHelper.prepareAggregation(oldExchange, exchange);
960            result.set(strategy.aggregate(oldExchange, exchange));
961        }
962    }
963
964    protected void updateNewExchange(Exchange exchange, int index, Iterable<ProcessorExchangePair> allPairs,
965                                     Iterator<ProcessorExchangePair> it) {
966        exchange.setProperty(Exchange.MULTICAST_INDEX, index);
967        if (it.hasNext()) {
968            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.FALSE);
969        } else {
970            exchange.setProperty(Exchange.MULTICAST_COMPLETE, Boolean.TRUE);
971        }
972    }
973
974    protected Integer getExchangeIndex(Exchange exchange) {
975        return exchange.getProperty(Exchange.MULTICAST_INDEX, Integer.class);
976    }
977
978    protected Iterable<ProcessorExchangePair> createProcessorExchangePairs(Exchange exchange) throws Exception {
979        List<ProcessorExchangePair> result = new ArrayList<ProcessorExchangePair>(processors.size());
980
981        StreamCache streamCache = null;
982        if (isParallelProcessing() && exchange.getIn().getBody() instanceof StreamCache) {
983            // in parallel processing case, the stream must be copied, therefore get the stream
984            streamCache = (StreamCache) exchange.getIn().getBody();
985        }
986
987        int index = 0;
988        for (Processor processor : processors) {
989            // copy exchange, and do not share the unit of work
990            Exchange copy = ExchangeHelper.createCorrelatedCopy(exchange, false);
991
992            if (streamCache != null) {
993                if (index > 0) {
994                    // copy it otherwise parallel processing is not possible,
995                    // because streams can only be read once
996                    StreamCache copiedStreamCache = streamCache.copy(copy);
997                    if (copiedStreamCache != null) {
998                        copy.getIn().setBody(copiedStreamCache);
999                    }
1000                }
1001            }
1002
1003            // If the multi-cast processor has an aggregation strategy
1004            // then the StreamCache created by the child routes must not be 
1005            // closed by the unit of work of the child route, but by the unit of 
1006            // work of the parent route or grand parent route or grand grand parent route ...(in case of nesting).
1007            // Set therefore the unit of work of the  parent route as stream cache unit of work, 
1008            // if it is not already set.
1009            if (copy.getProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK) == null) {
1010                copy.setProperty(Exchange.STREAM_CACHE_UNIT_OF_WORK, exchange.getUnitOfWork());
1011            }
1012            // if we share unit of work, we need to prepare the child exchange
1013            if (isShareUnitOfWork()) {
1014                prepareSharedUnitOfWork(copy, exchange);
1015            }
1016
1017            // and add the pair
1018            RouteContext routeContext = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getRouteContext() : null;
1019            result.add(createProcessorExchangePair(index++, processor, copy, routeContext));
1020        }
1021
1022        if (exchange.getException() != null) {
1023            // force any exceptions occurred during creation of exchange paris to be thrown
1024            // before returning the answer;
1025            throw exchange.getException();
1026        }
1027
1028        return result;
1029    }
1030
1031    /**
1032     * Creates the {@link ProcessorExchangePair} which holds the processor and exchange to be send out.
1033     * <p/>
1034     * You <b>must</b> use this method to create the instances of {@link ProcessorExchangePair} as they
1035     * need to be specially prepared before use.
1036     *
1037     * @param index        the index
1038     * @param processor    the processor
1039     * @param exchange     the exchange
1040     * @param routeContext the route context
1041     * @return prepared for use
1042     */
1043    protected ProcessorExchangePair createProcessorExchangePair(int index, Processor processor, Exchange exchange,
1044                                                                RouteContext routeContext) {
1045        Processor prepared = processor;
1046
1047        // set property which endpoint we send to
1048        setToEndpoint(exchange, prepared);
1049
1050        // rework error handling to support fine grained error handling
1051        prepared = createErrorHandler(routeContext, exchange, prepared);
1052
1053        // invoke on prepare on the exchange if specified
1054        if (onPrepare != null) {
1055            try {
1056                onPrepare.process(exchange);
1057            } catch (Exception e) {
1058                exchange.setException(e);
1059            }
1060        }
1061        return new DefaultProcessorExchangePair(index, processor, prepared, exchange);
1062    }
1063
1064    protected Processor createErrorHandler(RouteContext routeContext, Exchange exchange, Processor processor) {
1065        Processor answer;
1066
1067        boolean tryBlock = exchange.getProperty(Exchange.TRY_ROUTE_BLOCK, false, boolean.class);
1068
1069        // do not wrap in error handler if we are inside a try block
1070        if (!tryBlock && routeContext != null) {
1071            // wrap the producer in error handler so we have fine grained error handling on
1072            // the output side instead of the input side
1073            // this is needed to support redelivery on that output alone and not doing redelivery
1074            // for the entire multicast block again which will start from scratch again
1075
1076            // create key for cache
1077            final PreparedErrorHandler key = new PreparedErrorHandler(routeContext, processor);
1078
1079            // lookup cached first to reuse and preserve memory
1080            answer = errorHandlers.get(key);
1081            if (answer != null) {
1082                LOG.trace("Using existing error handler for: {}", processor);
1083                return answer;
1084            }
1085
1086            LOG.trace("Creating error handler for: {}", processor);
1087            ErrorHandlerFactory builder = routeContext.getRoute().getErrorHandlerBuilder();
1088            // create error handler (create error handler directly to keep it light weight,
1089            // instead of using ProcessorDefinition.wrapInErrorHandler)
1090            try {
1091                processor = builder.createErrorHandler(routeContext, processor);
1092
1093                // and wrap in unit of work processor so the copy exchange also can run under UoW
1094                answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1095
1096                boolean child = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class) != null;
1097
1098                // must start the error handler
1099                ServiceHelper.startServices(answer);
1100
1101                // here we don't cache the child unit of work
1102                if (!child) {
1103                    // add to cache
1104                    errorHandlers.putIfAbsent(key, answer);
1105                }
1106
1107            } catch (Exception e) {
1108                throw ObjectHelper.wrapRuntimeCamelException(e);
1109            }
1110        } else {
1111            // and wrap in unit of work processor so the copy exchange also can run under UoW
1112            answer = createUnitOfWorkProcessor(routeContext, processor, exchange);
1113        }
1114
1115        return answer;
1116    }
1117
1118    /**
1119     * Strategy to create the unit of work to be used for the sub route
1120     *
1121     * @param routeContext the route context
1122     * @param processor    the processor
1123     * @param exchange     the exchange
1124     * @return the unit of work processor
1125     */
1126    protected Processor createUnitOfWorkProcessor(RouteContext routeContext, Processor processor, Exchange exchange) {
1127        CamelInternalProcessor internal = new CamelInternalProcessor(processor);
1128
1129        // and wrap it in a unit of work so the UoW is on the top, so the entire route will be in the same UoW
1130        UnitOfWork parent = exchange.getProperty(Exchange.PARENT_UNIT_OF_WORK, UnitOfWork.class);
1131        if (parent != null) {
1132            internal.addAdvice(new CamelInternalProcessor.ChildUnitOfWorkProcessorAdvice(routeContext, parent));
1133        } else {
1134            internal.addAdvice(new CamelInternalProcessor.UnitOfWorkProcessorAdvice(routeContext));
1135        }
1136
1137        return internal;
1138    }
1139
1140    /**
1141     * Prepares the exchange for participating in a shared unit of work
1142     * <p/>
1143     * This ensures a child exchange can access its parent {@link UnitOfWork} when it participate
1144     * in a shared unit of work.
1145     *
1146     * @param childExchange  the child exchange
1147     * @param parentExchange the parent exchange
1148     */
1149    protected void prepareSharedUnitOfWork(Exchange childExchange, Exchange parentExchange) {
1150        childExchange.setProperty(Exchange.PARENT_UNIT_OF_WORK, parentExchange.getUnitOfWork());
1151    }
1152
1153    protected void doStart() throws Exception {
1154        if (isParallelProcessing() && executorService == null) {
1155            throw new IllegalArgumentException("ParallelProcessing is enabled but ExecutorService has not been set");
1156        }
1157        if (timeout > 0 && !isParallelProcessing()) {
1158            throw new IllegalArgumentException("Timeout is used but ParallelProcessing has not been enabled");
1159        }
1160        if (isParallelProcessing() && aggregateExecutorService == null) {
1161            // use unbounded thread pool so we ensure the aggregate on-the-fly task always will have assigned a thread
1162            // and run the tasks when the task is submitted. If not then the aggregate task may not be able to run
1163            // and signal completion during processing, which would lead to what would appear as a dead-lock or a slow processing
1164            String name = getClass().getSimpleName() + "-AggregateTask";
1165            aggregateExecutorService = createAggregateExecutorService(name);
1166        }
1167        ServiceHelper.startServices(aggregationStrategy, processors);
1168    }
1169
1170    /**
1171     * Strategy to create the thread pool for the aggregator background task which waits for and aggregates
1172     * completed tasks when running in parallel mode.
1173     *
1174     * @param name  the suggested name for the background thread
1175     * @return the thread pool
1176     */
1177    protected synchronized ExecutorService createAggregateExecutorService(String name) {
1178        // use a cached thread pool so we each on-the-fly task has a dedicated thread to process completions as they come in
1179        return camelContext.getExecutorServiceManager().newCachedThreadPool(this, name);
1180    }
1181
1182    @Override
1183    protected void doStop() throws Exception {
1184        ServiceHelper.stopServices(processors, errorHandlers, aggregationStrategy);
1185    }
1186
1187    @Override
1188    protected void doShutdown() throws Exception {
1189        ServiceHelper.stopAndShutdownServices(processors, errorHandlers, aggregationStrategy);
1190        // only clear error handlers when shutting down
1191        errorHandlers.clear();
1192
1193        if (shutdownExecutorService && executorService != null) {
1194            getCamelContext().getExecutorServiceManager().shutdownNow(executorService);
1195        }
1196        if (aggregateExecutorService != null) {
1197            getCamelContext().getExecutorServiceManager().shutdownNow(aggregateExecutorService);
1198        }
1199    }
1200
1201    protected static void setToEndpoint(Exchange exchange, Processor processor) {
1202        if (processor instanceof Producer) {
1203            Producer producer = (Producer) processor;
1204            exchange.setProperty(Exchange.TO_ENDPOINT, producer.getEndpoint().getEndpointUri());
1205        }
1206    }
1207
1208    protected AggregationStrategy getAggregationStrategy(Exchange exchange) {
1209        AggregationStrategy answer = null;
1210
1211        // prefer to use per Exchange aggregation strategy over a global strategy
1212        if (exchange != null) {
1213            Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1214            Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1215            if (map != null) {
1216                answer = map.get(this);
1217            }
1218        }
1219        if (answer == null) {
1220            // fallback to global strategy
1221            answer = getAggregationStrategy();
1222        }
1223        return answer;
1224    }
1225
1226    /**
1227     * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}.
1228     *
1229     * @param exchange            the exchange
1230     * @param aggregationStrategy the strategy
1231     */
1232    protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) {
1233        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1234        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1235        if (map == null) {
1236            map = new ConcurrentHashMap<Object, AggregationStrategy>();
1237        } else {
1238            // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties
1239            // we just create a new copy if we need to change the map
1240            map = new ConcurrentHashMap<Object, AggregationStrategy>(map);
1241        }
1242        // store the strategy using this processor as the key
1243        // (so we can store multiple strategies on the same exchange)
1244        map.put(this, aggregationStrategy);
1245        exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map);
1246    }
1247
1248    /**
1249     * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange}
1250     * which must be done after use.
1251     *
1252     * @param exchange the current exchange
1253     */
1254    protected void removeAggregationStrategyFromExchange(Exchange exchange) {
1255        Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class);
1256        Map<Object, AggregationStrategy> map = CastUtils.cast(property);
1257        if (map == null) {
1258            return;
1259        }
1260        // remove the strategy using this processor as the key
1261        map.remove(this);
1262    }
1263
1264    /**
1265     * Is the multicast processor working in streaming mode?
1266     * <p/>
1267     * In streaming mode:
1268     * <ul>
1269     * <li>we use {@link Iterable} to ensure we can send messages as soon as the data becomes available</li>
1270     * <li>for parallel processing, we start aggregating responses as they get send back to the processor;
1271     * this means the {@link org.apache.camel.processor.aggregate.AggregationStrategy} has to take care of handling out-of-order arrival of exchanges</li>
1272     * </ul>
1273     */
1274    public boolean isStreaming() {
1275        return streaming;
1276    }
1277
1278    /**
1279     * Should the multicast processor stop processing further exchanges in case of an exception occurred?
1280     */
1281    public boolean isStopOnException() {
1282        return stopOnException;
1283    }
1284
1285    /**
1286     * Returns the producers to multicast to
1287     */
1288    public Collection<Processor> getProcessors() {
1289        return processors;
1290    }
1291
1292    /**
1293     * An optional timeout in millis when using parallel processing
1294     */
1295    public long getTimeout() {
1296        return timeout;
1297    }
1298
1299    /**
1300     * Use {@link #getAggregationStrategy(org.apache.camel.Exchange)} instead.
1301     */
1302    public AggregationStrategy getAggregationStrategy() {
1303        return aggregationStrategy;
1304    }
1305
1306    public boolean isParallelProcessing() {
1307        return parallelProcessing;
1308    }
1309
1310    public boolean isParallelAggregate() {
1311        return parallelAggregate;
1312    }
1313
1314    public boolean isStopOnAggregateException() {
1315        return stopOnAggregateException;
1316    }
1317
1318    public boolean isShareUnitOfWork() {
1319        return shareUnitOfWork;
1320    }
1321
1322    public List<Processor> next() {
1323        if (!hasNext()) {
1324            return null;
1325        }
1326        return new ArrayList<Processor>(processors);
1327    }
1328
1329    public boolean hasNext() {
1330        return processors != null && !processors.isEmpty();
1331    }
1332}