summaryrefslogtreecommitdiffstats
path: root/WebCore/webaudio/AudioContext.h
diff options
context:
space:
mode:
Diffstat (limited to 'WebCore/webaudio/AudioContext.h')
-rw-r--r--WebCore/webaudio/AudioContext.h248
1 files changed, 248 insertions, 0 deletions
diff --git a/WebCore/webaudio/AudioContext.h b/WebCore/webaudio/AudioContext.h
new file mode 100644
index 0000000..f175bfe
--- /dev/null
+++ b/WebCore/webaudio/AudioContext.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AudioContext_h
+#define AudioContext_h
+
+#include "ActiveDOMObject.h"
+#include "AudioBus.h"
+#include "AudioDestinationNode.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/RefPtr.h>
+#include <wtf/Threading.h>
+#include <wtf/Vector.h>
+#include <wtf/text/AtomicStringHash.h>
+
+namespace WebCore {
+
+class AudioBuffer;
+class AudioBufferSourceNode;
+class AudioChannelMerger;
+class AudioChannelSplitter;
+class AudioGainNode;
+class AudioPannerNode;
+class AudioListener;
+class CachedAudio;
+class DelayNode;
+class Document;
+class LowPass2FilterNode;
+class HighPass2FilterNode;
+class ConvolverNode;
+class RealtimeAnalyserNode;
+class JavaScriptAudioNode;
+
+// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
+// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
+
+class AudioContext : public ActiveDOMObject, public RefCounted<AudioContext> {
+public:
+ static PassRefPtr<AudioContext> create(Document*);
+
+ virtual ~AudioContext();
+
+ bool isInitialized() const;
+
+ // Returns true when initialize() was called AND all asynchronous initialization has completed.
+ bool isRunnable() const;
+
+ // Document notification
+ virtual void stop();
+
+ Document* document(); // ASSERTs if document no longer exists.
+ bool hasDocument();
+
+ AudioDestinationNode* destination() { return m_destinationNode.get(); }
+ double currentTime() { return m_destinationNode->currentTime(); }
+ double sampleRate() { return m_destinationNode->sampleRate(); }
+
+ PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+
+ PassRefPtr<CachedAudio> createAudioRequest(const String &url, bool mixToMono);
+
+ // Keep track of this buffer so we can release memory after the context is shut down...
+ void refBuffer(PassRefPtr<AudioBuffer> buffer);
+
+ AudioListener* listener() { return m_listener.get(); }
+
+ // The AudioNode create methods are called on the main thread (from JavaScript).
+ PassRefPtr<AudioBufferSourceNode> createBufferSource();
+ PassRefPtr<AudioGainNode> createGainNode();
+ PassRefPtr<DelayNode> createDelayNode();
+ PassRefPtr<LowPass2FilterNode> createLowPass2Filter();
+ PassRefPtr<HighPass2FilterNode> createHighPass2Filter();
+ PassRefPtr<AudioPannerNode> createPanner();
+ PassRefPtr<ConvolverNode> createConvolver();
+ PassRefPtr<RealtimeAnalyserNode> createAnalyser();
+ PassRefPtr<JavaScriptAudioNode> createJavaScriptNode(size_t bufferSize);
+ PassRefPtr<AudioChannelSplitter> createChannelSplitter();
+ PassRefPtr<AudioChannelMerger> createChannelMerger();
+
+ AudioBus* temporaryMonoBus() { return m_temporaryMonoBus.get(); }
+ AudioBus* temporaryStereoBus() { return m_temporaryStereoBus.get(); }
+
+ // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
+ void notifyNodeFinishedProcessing(AudioNode*);
+
+ // Called at the end of each render quantum.
+ void handlePostRenderTasks();
+
+ // Called periodically at the end of each render quantum to dereference finished source nodes.
+ void derefFinishedSourceNodes();
+
+ // We reap all marked nodes at the end of each realtime render quantum in deleteMarkedNodes().
+ void markForDeletion(AudioNode*);
+ void deleteMarkedNodes();
+
+ // Keeps track of the number of connections made.
+ void incrementConnectionCount()
+ {
+ ASSERT(isMainThread());
+ m_connectionCount++;
+ }
+
+ unsigned connectionCount() const { return m_connectionCount; }
+
+ //
+ // Thread Safety and Graph Locking:
+ //
+
+ void setAudioThread(ThreadIdentifier thread) { m_audioThread = thread; } // FIXME: check either not initialized or the same
+ ThreadIdentifier audioThread() const { return m_audioThread; }
+ bool isAudioThread();
+
+ // Returns true only after the audio thread has been started and then shutdown.
+ bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
+
+ // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
+ void lock(bool& mustReleaseLock);
+
+ // Returns true if we own the lock.
+ // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
+ bool tryLock(bool& mustReleaseLock);
+
+ void unlock();
+
+ // Returns true if this thread owns the context's lock.
+ bool isGraphOwner();
+
+ class AutoLocker {
+ public:
+ AutoLocker(AudioContext* context)
+ : m_context(context)
+ {
+ ASSERT(context);
+ context->lock(m_mustReleaseLock);
+ }
+
+ ~AutoLocker()
+ {
+ if (m_mustReleaseLock)
+ m_context->unlock();
+ }
+ private:
+ AudioContext* m_context;
+ bool m_mustReleaseLock;
+ };
+
+ // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here.
+ void addDeferredFinishDeref(AudioNode*, AudioNode::RefType);
+
+ // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs().
+ void handleDeferredFinishDerefs();
+
+private:
+ AudioContext(Document*);
+ void lazyInitialize();
+ void uninitialize();
+
+ bool m_isInitialized;
+ bool m_isAudioThreadFinished;
+ bool m_isAudioThreadShutdown;
+
+ Document* m_document;
+
+ // The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to.
+ // In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode.
+ // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
+ // uniquely connected to. See the AudioNode::ref() and AudioNode::deref() methods for more details.
+ void refNode(AudioNode*);
+ void derefNode(AudioNode*);
+
+ // When the context goes away, there might still be some sources which haven't finished playing.
+ // Make sure to dereference them here.
+ void derefUnfinishedSourceNodes();
+
+ RefPtr<AudioDestinationNode> m_destinationNode;
+ RefPtr<AudioListener> m_listener;
+
+ // Only accessed in the main thread.
+ Vector<RefPtr<AudioBuffer> > m_allocatedBuffers;
+
+ // Only accessed in the audio thread.
+ Vector<AudioNode*> m_finishedNodes;
+
+ // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation
+ // with an optional argument for refType. We need to use the special refType: RefTypeConnection
+ // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished.
+ Vector<AudioNode*> m_referencedNodes;
+
+ // Accumulate nodes which need to be deleted at the end of a render cycle (in realtime thread) here.
+ Vector<AudioNode*> m_nodesToDelete;
+
+ Vector<RefPtr<CachedAudio> > m_cachedAudioReferences;
+
+ OwnPtr<AudioBus> m_temporaryMonoBus;
+ OwnPtr<AudioBus> m_temporaryStereoBus;
+
+ unsigned m_connectionCount;
+
+ // Graph locking.
+ Mutex m_contextGraphMutex;
+ volatile ThreadIdentifier m_audioThread;
+ volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier
+
+ // Deferred de-referencing.
+ struct RefInfo {
+ RefInfo(AudioNode* node, AudioNode::RefType refType)
+ : m_node(node)
+ , m_refType(refType)
+ {
+ }
+ AudioNode* m_node;
+ AudioNode::RefType m_refType;
+ };
+
+ // Only accessed in the audio thread.
+ Vector<RefInfo> m_deferredFinishDerefList;
+};
+
+} // WebCore
+
+#endif // AudioContext_h