add some pyaudio test files
add some pyaudio test files

file:a/snd.py -> file:b/snd.py
--- a/snd.py
+++ b/snd.py
@@ -126,7 +126,7 @@
     data = pack('<' + ('h'*len(data)), *data)
 
     wf = wave.open(path, 'wb')
-    wf.setnchannels(1)
+    wf.setnchannels(CHANNELS)
     wf.setsampwidth(sample_width)
     wf.setframerate(RATE)
     wf.writeframes(data)

file:b/test.py (new)
--- /dev/null
+++ b/test.py
@@ -1,1 +1,41 @@
+"""PyAudio example: Record a few seconds of audio and save to a WAVE file."""
 
+import pyaudio
+import wave
+
+CHUNK = 1024
+FORMAT = pyaudio.paInt16
+CHANNELS = 2
+RATE = 44100
+RECORD_SECONDS = 5
+WAVE_OUTPUT_FILENAME = "output.wav"
+
+p = pyaudio.PyAudio()
+
+stream = p.open(format=FORMAT,
+                channels=CHANNELS,
+                rate=RATE,
+                input=True,
+                frames_per_buffer=CHUNK)
+
+print("* recording")
+
+frames = []
+
+for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
+    data = stream.read(CHUNK)
+    frames.append(data)
+
+print("* done recording")
+
+stream.stop_stream()
+stream.close()
+p.terminate()
+
+wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
+wf.setnchannels(CHANNELS)
+wf.setsampwidth(p.get_sample_size(FORMAT))
+wf.setframerate(RATE)
+wf.writeframes(b''.join(frames))
+wf.close()
+

file:b/test2.py (new)
--- /dev/null
+++ b/test2.py
@@ -1,1 +1,121 @@
+from os.path import exists
+from array import array
+from struct import unpack, pack
 
+import pyaudio
+import wave
+
+THRESHOLD = 500
+CHUNK_SIZE = 1024
+FORMAT = pyaudio.paInt16
+RATE = 44100
+
+def is_silent(L):
+    "Returns `True` if below the 'silent' threshold"
+    return max(L) < THRESHOLD
+
+def normalize(L):
+    "Average the volume out"
+    MAXIMUM = 16384
+    times = float(MAXIMUM)/max(abs(i) for i in L)
+
+    LRtn = array('h')
+    for i in L:
+        LRtn.append(int(i*times))
+    return LRtn
+
+def trim(L):
+    "Trim the blank spots at the start and end"
+    def _trim(L):
+        snd_started = False
+        LRtn = array('h')
+
+        for i in L:
+            if not snd_started and abs(i)>THRESHOLD:
+                snd_started = True
+                LRtn.append(i)
+
+            elif snd_started:
+                LRtn.append(i)
+        return LRtn
+
+    # Trim to the left
+    L = _trim(L)
+
+    # Trim to the right
+    L.reverse()
+    L = _trim(L)
+    L.reverse()
+    return L
+
+def add_silence(L, seconds):
+    "Add silence to the start and end of `L` of length `seconds` (float)"
+    LRtn = array('h', [0 for i in xrange(int(seconds*RATE))])
+    LRtn.extend(L)
+    LRtn.extend([0 for i in xrange(int(seconds*RATE))])
+    return LRtn
+
+def record():
+    """
+    Record a word or words from the microphone and 
+    return the data as an array of signed shorts.
+
+    Normalizes the audio, trims silence from the 
+    start and end, and pads with 0.5 seconds of 
+    blank sound to make sure VLC et al can play 
+    it without getting chopped off.
+    """
+    p = pyaudio.PyAudio()
+    stream = p.open(format=FORMAT, channels=2, rate=RATE, 
+                    input=True, 
+                    frames_per_buffer=CHUNK_SIZE)
+
+    num_silent = 0
+    snd_started = False
+
+    LRtn = array('h')
+
+    while 1:
+        data = stream.read(CHUNK_SIZE)
+        L = unpack('<' + ('h'*(len(data)/2)), data) # little endian, signed short
+        L = array('h', L)
+        LRtn.extend(L)
+
+        silent = is_silent(L)
+        #print silent, num_silent, L[:10]
+
+        if silent and snd_started:
+            num_silent += 1
+        elif not silent and not snd_started:
+            snd_started = True
+
+        if snd_started and num_silent > 30:
+            break
+
+    sample_width = p.get_sample_size(FORMAT)
+    stream.stop_stream()
+    stream.close()
+    p.terminate()
+
+    LRtn = normalize(LRtn)
+    LRtn = trim(LRtn)
+    LRtn = add_silence(LRtn, 0.5)
+    return sample_width, LRtn
+
+def record_to_file(path):
+    "Records from the microphone and outputs the resulting data to `path`"
+    sample_width, data = record()
+    data = pack('<' + ('h'*len(data)), *data)
+
+    wf = wave.open(path, 'wb')
+    wf.setnchannels(2)
+    wf.setsampwidth(sample_width)
+    wf.setframerate(RATE)
+    wf.writeframes(data)
+    wf.close()
+
+if __name__ == '__main__':
+    print("please speak a word into the microphone")
+    record_to_file('demo.wav')
+    print("done - result written to demo.wav")
+