1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 import time
19
20 import gst
21 from twisted.internet import defer, reactor
22
23 from flumotion.common import messages, fxml, gstreamer, documentation
24 from flumotion.common.i18n import N_, gettexter
25 from flumotion.component import feedcomponent
26 from flumotion.component.base import watcher
27
28 import smartscale
29 import singledecodebin
30 import playlistparser
31
32 __version__ = "$Rev$"
33 T_ = gettexter()
34
35
37 """
38 Return a string in local time from a gstreamer timestamp value
39 """
40 return time.ctime(ts/gst.SECOND)
41
42
44 src = gst.element_factory_make('videotestsrc')
45 if pattern:
46 src.props.pattern = pattern
47 else:
48
49 src.props.pattern = 2
50 gnlsrc = gst.element_factory_make('gnlsource', name)
51 gnlsrc.props.start = start
52 gnlsrc.props.duration = duration
53 gnlsrc.props.media_start = 0
54 gnlsrc.props.media_duration = duration
55 gnlsrc.props.priority = priority
56 gnlsrc.add(src)
57
58 return gnlsrc
59
60
62 src = gst.element_factory_make('audiotestsrc')
63 if wave:
64 src.props.wave = wave
65 else:
66
67 src.props.wave = 4
68 gnlsrc = gst.element_factory_make('gnlsource', name)
69 gnlsrc.props.start = start
70 gnlsrc.props.duration = duration
71 gnlsrc.props.media_start = 0
72 gnlsrc.props.media_duration = duration
73 gnlsrc.props.priority = priority
74 gnlsrc.add(src)
75
76 return gnlsrc
77
78
79 -def file_gnl_src(name, uri, caps, start, duration, offset, priority):
91
92
100
101
103 logCategory = 'playlist-prod'
104 componentMediumClass = PlaylistProducerMedium
105
107 self.basetime = -1
108
109 self._hasAudio = True
110 self._hasVideo = True
111
112
113 self.videocomp = None
114 self.audiocomp = None
115
116 self.videocaps = gst.Caps("video/x-raw-yuv;video/x-raw-rgb")
117 self.audiocaps = gst.Caps("audio/x-raw-int;audio/x-raw-float")
118
119 self._vsrcs = {}
120 self._asrcs = {}
121
122 self.uiState.addListKey("playlist")
123
125 audiorate = gst.element_factory_make("audiorate")
126 audioconvert = gst.element_factory_make('audioconvert')
127 resampler = 'audioresample'
128 if gstreamer.element_factory_exists('legacyresample'):
129 resampler = 'legacyresample'
130 audioresample = gst.element_factory_make(resampler)
131 outcaps = gst.Caps(
132 "audio/x-raw-int,channels=%d,rate=%d,width=16,depth=16" %
133 (self._channels, self._samplerate))
134
135 capsfilter = gst.element_factory_make("capsfilter")
136 capsfilter.props.caps = outcaps
137
138 pipeline.add(audiorate, audioconvert, audioresample, capsfilter)
139 src.link(audioconvert)
140 audioconvert.link(audioresample)
141 audioresample.link(audiorate)
142 audiorate.link(capsfilter)
143
144 return capsfilter.get_pad('src')
145
147 outcaps = gst.Caps(
148 "video/x-raw-yuv,width=%d,height=%d,framerate=%d/%d,"
149 "pixel-aspect-ratio=1/1" %
150 (self._width, self._height, self._framerate[0],
151 self._framerate[1]))
152
153 cspace = gst.element_factory_make("ffmpegcolorspace")
154 scaler = smartscale.SmartVideoScale()
155 scaler.set_caps(outcaps)
156 videorate = gst.element_factory_make("videorate")
157 capsfilter = gst.element_factory_make("capsfilter")
158 capsfilter.props.caps = outcaps
159
160 pipeline.add(cspace, scaler, videorate, capsfilter)
161
162 src.link(cspace)
163 cspace.link(scaler)
164 scaler.link(videorate)
165 videorate.link(capsfilter)
166 return capsfilter.get_pad('src')
167
169 pipeline = gst.Pipeline()
170
171 for mediatype in ['audio', 'video']:
172 if (mediatype == 'audio' and not self._hasAudio) or (
173 mediatype == 'video' and not self._hasVideo):
174 continue
175
176
177
178
179
180
181
182 composition = gst.element_factory_make("gnlcomposition",
183 mediatype + "-composition")
184
185 segmentidentity = gst.element_factory_make("identity")
186 segmentidentity.set_property("single-segment", True)
187 segmentidentity.set_property("silent", True)
188 syncidentity = gst.element_factory_make("identity")
189 syncidentity.set_property("silent", True)
190 syncidentity.set_property("sync", True)
191
192 pipeline.add(composition, segmentidentity, syncidentity)
193
194 def _padAddedCb(element, pad, target):
195 self.debug("Pad added, linking")
196 pad.link(target)
197 composition.connect('pad-added', _padAddedCb,
198 syncidentity.get_pad("sink"))
199 syncidentity.link(segmentidentity)
200
201 if mediatype == 'audio':
202 self.audiocomp = composition
203 srcpad = self._buildAudioPipeline(pipeline, segmentidentity)
204 else:
205 self.videocomp = composition
206 srcpad = self._buildVideoPipeline(pipeline, segmentidentity)
207
208 feedername = self.feeders[mediatype].elementName
209
210 feederchunk = \
211 feedcomponent.ParseLaunchComponent.FEEDER_TMPL \
212 % {'name': feedername}
213
214 binstr = "bin.("+feederchunk+" )"
215 self.debug("Parse for media composition is %s", binstr)
216
217 bin = gst.parse_launch(binstr)
218 pad = bin.find_unconnected_pad(gst.PAD_SINK)
219 ghostpad = gst.GhostPad(mediatype + "-feederpad", pad)
220 bin.add_pad(ghostpad)
221
222 pipeline.add(bin)
223 srcpad.link(ghostpad)
224
225 return pipeline
226
228 if self._hasVideo:
229 vsrc = videotest_gnl_src("videotestdefault", 0, 2**63 - 1,
230 2**31 - 1, properties.get('video-pattern', None))
231 self.videocomp.add(vsrc)
232
233 if self._hasAudio:
234 asrc = audiotest_gnl_src("videotestdefault", 0, 2**63 - 1,
235 2**31 - 1, properties.get('audio-wave', None))
236 self.audiocomp.add(asrc)
237
239 raise NotImplementedError("Playlist producer doesn't support slaving")
240
242
243
244 if self.medium:
245 ip = self.medium.getIP()
246 else:
247 ip = "127.0.0.1"
248
249 clock = self.pipeline.get_clock()
250 self.clock_provider = gst.NetTimeProvider(clock, None, port)
251
252 self.clock_provider.set_property('active', False)
253
254 self._master_clock_info = (ip, port, self.basetime)
255
256 return defer.succeed(self._master_clock_info)
257
259 return self._master_clock_info
260
262
263 clock = gst.system_clock_obtain()
264 clock.set_property('clock-type', 'realtime')
265
266
267 self.basetime = clock.get_time()
268
269
270 pipeline.use_clock(clock)
271
272 pipeline.set_new_stream_time(gst.CLOCK_TIME_NONE)
273
274 self.debug("Setting basetime of %d", self.basetime)
275 pipeline.set_base_time(self.basetime)
276
281
283 return self.pipeline.query_position(gst.FORMAT_TIME)[0]
284
286 """
287 Schedule a given playlist item in our playback compositions.
288 """
289 start = item.timestamp - self.basetime
290 self.debug("Starting item %s at %d seconds from start: %s", item.uri,
291 start/gst.SECOND, _tsToString(item.timestamp))
292
293
294
295
296
297
298
299 now = self.getCurrentPosition()
300 neareststarttime = now + 5 * gst.SECOND
301
302 if start < neareststarttime:
303 if start + item.duration < neareststarttime:
304 self.debug("Item too late; skipping entirely")
305 return False
306 else:
307 change = neareststarttime - start
308 self.debug("Starting item with offset %d", change)
309 item.duration -= change
310 item.offset += change
311 start = neareststarttime
312
313 end = start + item.duration
314 timeuntilend = end - now
315
316
317 reactor.callLater(timeuntilend/gst.SECOND + 5,
318 self.unscheduleItem, item)
319
320 if self._hasVideo and item.hasVideo:
321 self.debug("Adding video source with start %d, duration %d, "
322 "offset %d", start, item.duration, item.offset)
323 vsrc = file_gnl_src(None, item.uri, self.videocaps,
324 start, item.duration, item.offset, 0)
325 self.videocomp.add(vsrc)
326 self._vsrcs[item] = vsrc
327 if self._hasAudio and item.hasAudio:
328 self.debug("Adding audio source with start %d, duration %d, "
329 "offset %d", start, item.duration, item.offset)
330 asrc = file_gnl_src(None, item.uri, self.audiocaps,
331 start, item.duration, item.offset, 0)
332 self.audiocomp.add(asrc)
333 self._asrcs[item] = asrc
334 self.debug("Done scheduling: start at %s, end at %s",
335 _tsToString(start + self.basetime),
336 _tsToString(start + self.basetime + item.duration))
337
338 self.uiState.append("playlist", (item.timestamp,
339 item.uri,
340 item.duration,
341 item.offset,
342 item.hasAudio,
343 item.hasVideo))
344 return True
345
347 self.debug("Unscheduling item at uri %s", item.uri)
348 if self._hasVideo and item.hasVideo and item in self._vsrcs:
349 vsrc = self._vsrcs.pop(item)
350 self.videocomp.remove(vsrc)
351 vsrc.set_state(gst.STATE_NULL)
352 if self._hasAudio and item.hasAudio and item in self._asrcs:
353 asrc = self._asrcs.pop(item)
354 self.audiocomp.remove(asrc)
355 asrc.set_state(gst.STATE_NULL)
356 for entry in self.uiState.get("playlist"):
357 if entry[0] == item.timestamp:
358 self.uiState.remove("playlist", entry)
359
361 if self._hasVideo and item.hasVideo:
362 vsrc = self._vsrcs[item]
363 vsrc.props.start = item.timestamp - self.basetime
364 vsrc.props.duration = item.duration
365 vsrc.props.media_duration = item.duration
366 if self._hasAudio and item.hasAudio:
367 asrc = self._asrcs[item]
368 asrc.props.start = item.timestamp - self.basetime
369 asrc.props.duration = item.duration
370 asrc.props.media_duration = item.duration
371
374
376 props = self.config['properties']
377
378 self._playlistfile = props.get('playlist', None)
379 self._playlistdirectory = props.get('playlist-directory', None)
380 self._baseDirectory = props.get('base-directory', None)
381
382 self._width = props.get('width', 320)
383 self._height = props.get('height', 240)
384 self._framerate = props.get('framerate', (15, 1))
385 self._samplerate = props.get('samplerate', 44100)
386 self._channels = props.get('channels', 2)
387
388 self._hasAudio = props.get('audio', True)
389 self._hasVideo = props.get('video', True)
390
391 pipeline = self._buildPipeline()
392 self._setupClock(pipeline)
393
394 self._createDefaultSources(props)
395
396 return pipeline
397
413
421
423
424 msgid = ("playlist-parse-error", file)
425 for m in self.state.get('messages'):
426 if m.id == msgid:
427 self.state.remove('messages', m)
428
451
463
464 for el in ["gnlsource", "gnlcomposition"]:
465 check_gnl(el)
466
483