- assert (len(caps))
- s = caps[0]
- name = s.get_name()
- if name.startswith('video'):
- q = Gst.ElementFactory.make('queue')
- conv = Gst.ElementFactory.make('videoconvert')
- sink = Gst.ElementFactory.make('autovideosink')
- self.pipe.add(q, conv, sink)
- self.pipe.sync_children_states()
- pad.link(q.get_static_pad('sink'))
- q.link(conv)
- conv.link(sink)
- elif name.startswith('audio'):
- q = Gst.ElementFactory.make('queue')
- conv = Gst.ElementFactory.make('audioconvert')
- resample = Gst.ElementFactory.make('audioresample')
- sink = Gst.ElementFactory.make('autoaudiosink')
- self.pipe.add(q, conv, resample, sink)
- self.pipe.sync_children_states()
- pad.link(q.get_static_pad('sink'))
- q.link(conv)
- conv.link(resample)
- resample.link(sink)
-
- async def listen_to_gstreamer_bus(self):
- Gst.init(None)
- self.webrtcbin = Gst.ElementFactory.make('webrtcbin', 'laplace')
- self.pipe = Gst.Pipeline.new("pipeline")
- Gst.Bin.do_add_element(self.pipe, self.webrtcbin)
+ padsize = caps.get_size()
+
+ log.info(f'>>>> {padsize} {caps}')
+
+ for i in range(padsize):
+ s = caps.get_structure(i) # Gst.Structure
+ name = s.get_name()
+ log.info(f'###### {name}')
+ if name.startswith('video'):
+ q = Gst.ElementFactory.make('queue')
+ conv = Gst.ElementFactory.make('videoconvert')
+ enc = Gst.ElementFactory.make('x264enc')
+ enc.set_property('bitrate', 1000)
+ enc.set_property('tune', 'zerolatency')
+ capsfilter = Gst.ElementFactory.make('capsfilter')
+ capsfilter.set_properties(Gst.Caps.from_string('video/x-h264,stream-format=(string)avc'))
+ flmux = Gst.ElementFactory.make('flvmux')
+ sink = Gst.ElementFactory.make('rtmpsink')
+ sink.set_property('location', 'rtmp://192.168.1.46:1935/gregoa')
+ # sink.set_property('location', 'rtmp://bla:1936/gregoa')
+ print(sink.props.location, dir(sink.props))
+ assert q and conv and enc and capsfilter and flmux and sink
+
+ self.pipe.add(q)
+ self.pipe.add(conv)
+ self.pipe.add(enc)
+ self.pipe.add(capsfilter)
+ self.pipe.add(flmux)
+ self.pipe.add(sink)
+ self.pipe.sync_children_states()
+
+ q_pad_sink = q.get_static_pad('sink')
+ assert q_pad_sink
+ pad_link_return = pad.link(q_pad_sink)
+ assert pad_link_return == Gst.PadLinkReturn.OK
+
+ # ok = element.link(q)
+ # assert ok
+
+ ok = q.link(conv)
+ assert ok
+ ok = conv.link(enc)
+ assert ok
+ ok = enc.link(capsfilter)
+ assert ok
+ ok = capsfilter.link(flmux)
+ assert ok
+ ok = flmux.link(sink)
+ assert ok
+ self.pipe.set_state(Gst.State.PLAYING)
+ #print(dir(Gst.DebugGraphDetails))
+ #Gst.debug_bin_to_dot_data(element, Gst.DebugGraphDetails.ALL)
+
+ elif name.startswith('audio'):
+ q = Gst.ElementFactory.make('queue')
+ conv = Gst.ElementFactory.make('audioconvert')
+ resample = Gst.ElementFactory.make('audioresample')
+ sink = Gst.ElementFactory.make('autoaudiosink')
+ self.pipe.add(q)
+ self.pipe.add(conv)
+ self.pipe.add(resample)
+ self.pipe.add(sink)
+ self.pipe.sync_children_states()
+ pad.link(q.get_static_pad('sink'))
+ q.link(conv)
+ conv.link(resample)
+ resample.link(sink)
+
+ def set_remote_desciption_done(self, gst_promise):
+ gst_promise = Gst.Promise.new_with_change_func(self.create_answer_done)
+ self.webrtcbin.emit('create-answer', None, gst_promise)
+
+ def create_answer_done(self, gst_promise):
+ reply = gst_promise.get_reply()
+ answer = reply.get_value('answer')
+ sdp_message = answer.sdp
+ mids = [sdp_message.get_media(i).get_attribute_val('mid')
+ for i in range(sdp_message.medias_len())]
+ user_fragments = [sdp_message.get_media(i).get_attribute_val('ice-ufrag')
+ for i in range(sdp_message.medias_len())]
+ self.events.sdp_info.put_nowait((mids, user_fragments))
+ sdp_answer = sdp_message.as_text()
+ log.info(f'Send SDP answer')
+ log.debug(f'SDP answer:\n{sdp_answer}')
+ self.events.sdp_answer.put_nowait(sdp_answer)
+ gst_promise = Gst.Promise.new_with_change_func(self.set_local_description_done)
+ self.webrtcbin.emit('set-local-description', answer, gst_promise)
+
+ def set_local_description_done(self, gst_promise):
+ gst_promise.get_reply()
+
+ async def run(self):