#include "WebRTCPublisher.h"
#include "BoostLog.h"
#include "IoContext.h"
#include "NetworkUtility.h"
#include <boost/json/object.hpp>
#include <boost/json/parse.hpp>
#include <com/amazonaws/kinesis/video/webrtcclient/Include.h>
#include <fstream>

class WebRTCPublisherPrivate {
public:
    WebRTCPublisherPrivate() {
        m_exit = false;
        m_thread = std::thread(&WebRTCPublisherPrivate::run, this);
    }
    ~WebRTCPublisherPrivate() {
        m_exit = true;
        if (m_thread.joinable()) {
            m_thread.join();
        }
    }
    void run() {
        int i = 0;
        int opusIndex = 0;
        int opusT = 0;
        int t = 0;
        while (!m_exit) {
            auto begin = std::chrono::system_clock::now();

            if (i > 1500) i = 0;
            if (opusIndex > 618) opusIndex = 0;
            if (!m_connected) continue;

            {
                std::ostringstream oss;
                oss << "/data/sdcard/lib/h264SampleFrames/frame-" << std::setfill('0') << std::setw(4) << i++ << ".h264";

                std::ifstream ifs(oss.str(), std::ifstream::binary);
                auto buffer = std::vector<uint8_t>((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());

                Frame frame;
                t += 40 * 1000 * 10;
                frame.presentationTs = t;
                frame.decodingTs = t;
                frame.frameData = buffer.data();
                frame.size = buffer.size();
                STATUS retStatus = writeFrame(videoReceiver, &frame);
            }

            {
                std::ostringstream oss;
                oss << "/data/sdcard/lib/opusSampleFrames/sample-" << std::setfill('0') << std::setw(3) << opusIndex << ".opus";
                std::ifstream ifs(oss.str(), std::ifstream::binary);
                auto buffer = std::vector<uint8_t>((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
                Frame frame;
                frame.index = opusT;
                opusT += 20 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND;
                frame.presentationTs = opusT;
                frame.decodingTs = opusT;
                frame.frameData = buffer.data();
                frame.size = buffer.size();
                STATUS retStatus = writeFrame(audioReceiver, &frame);
                LOG(info) << "send opus " << opusIndex;
                opusIndex++;
            }
            while (true) {
                auto ele = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - begin);
                auto s = std::chrono::milliseconds(20) - ele;
                if (s.count() > 0) {
                    continue;
                } else {
                    break;
                }
            }
        }
    }
    RtcPeerConnection *peer = nullptr;
    RtcRtpTransceiver *videoReceiver = nullptr;
    RtcRtpTransceiver *audioReceiver = nullptr;
    RtcSessionDescriptionInit offer;
    static void onConnectionStateChange(UINT64 userData, RTC_PEER_CONNECTION_STATE state) {
        auto self = reinterpret_cast<WebRTCPublisherPrivate *>(userData);
        LOG(info) << "connection state: " << state;
        if (state == RTC_PEER_CONNECTION_STATE_CONNECTED) {
            self->m_connected = true;
        }
    }

    std::thread m_thread;
    bool m_exit = true;
    bool m_connected = false;
};

WebRTCPublisher::WebRTCPublisher(bool videoEnabled, bool audioEnabled)
    : m_d(new WebRTCPublisherPrivate()), m_videoEnabled(videoEnabled), m_audioEnabled(audioEnabled) {
}

WebRTCPublisher::~WebRTCPublisher() {
    stop();
    if (m_d != nullptr) {
        delete m_d;
    }
}

bool WebRTCPublisher::start(const std::string &address, const std::string &port, const std::string &url) {
    using namespace Amass;
    stop();

    RtcConfiguration configuration{};
    memset(&configuration, 0, sizeof(configuration));
    STATUS status = createPeerConnection(&configuration, &m_d->peer);
    if (status != STATUS_SUCCESS) {
        LOG(error) << "createPeerConnection() failed, status: " << status;
        return false;
    }

    if (m_videoEnabled) {
        RtcMediaStreamTrack track{};
        track.codec = RTC_CODEC_H264_PROFILE_42E01F_LEVEL_ASYMMETRY_ALLOWED_PACKETIZATION_MODE;
        track.kind = MEDIA_STREAM_TRACK_KIND_VIDEO;
        strncpy(track.streamId, "0", sizeof(track.streamId));
        strncpy(track.trackId, "0", sizeof(track.trackId));

        RtcRtpTransceiverInit init;
        init.direction = RTC_RTP_TRANSCEIVER_DIRECTION_SENDONLY;
        status = addTransceiver(m_d->peer, &track, &init, &m_d->videoReceiver);
        if (status != STATUS_SUCCESS) {
            LOG(error) << "addTransceiver() failed, status: " << status;
            return false;
        }
    }

    if (m_audioEnabled) {
        RtcMediaStreamTrack track{};
        track.codec = RTC_CODEC_OPUS;
        track.kind = MEDIA_STREAM_TRACK_KIND_AUDIO;
        strncpy(track.streamId, "1", sizeof(track.streamId));
        strncpy(track.trackId, "1", sizeof(track.trackId));

        RtcRtpTransceiverInit init;
        init.direction = RTC_RTP_TRANSCEIVER_DIRECTION_SENDONLY;
        status = addTransceiver(m_d->peer, &track, &init, &m_d->audioReceiver);
        if (status != STATUS_SUCCESS) {
            LOG(error) << "addTransceiver() failed, status: " << status;
            return false;
        }
    }

    status = createOffer(m_d->peer, &m_d->offer);
    if (status != STATUS_SUCCESS) {
        LOG(error) << "createOffer() failed, status: " << status;

        return false;
    }

    status = setLocalDescription(m_d->peer, &m_d->offer);
    if (status != STATUS_SUCCESS) {
        LOG(error) << "setLocalDescription() failed, status: " << status;
        return false;
    }
    // LOG(info) << "sdp: \n" << m_d->offer.sdp;
    std::string sdp;

    boost::system::error_code error;
    auto ioContext = Singleton<IoContext>::instance();
    Http::Client client(*ioContext->ioContext(), Http::SSL);
    client.loadRootCertificates(error);
    auto reply = client.post(address, port, url, m_d->offer.sdp, error);
    if (error) {
        LOG(error) << "post error: " << error.message();
        return false;
    }
    auto replyValue = boost::json::parse(reply, error);
    if (error) {
        LOG(info) << reply;
        LOG(error) << error.message();
    }
    auto replyObject = replyValue.as_object();
    sdp = std::string(replyObject.at("sdp").as_string());

    // replyObject.at("sdp").as_string().c_str();
    // LOG(info) << reply;
    LOG(info) << sdp;
    RtcSessionDescriptionInit answer{};
    answer.type = SDP_TYPE_ANSWER;
    strncpy(answer.sdp, sdp.c_str(), sizeof(answer.sdp));
    status = setRemoteDescription(m_d->peer, &answer);
    if (status != STATUS_SUCCESS) {
        LOG(error) << "setRemoteDescription() failed, status: " << status;
        return false;
    }

    status =
        peerConnectionOnConnectionStateChange(m_d->peer, reinterpret_cast<UINT64>(m_d), &WebRTCPublisherPrivate::onConnectionStateChange);
    if (status != STATUS_SUCCESS) {
        LOG(error) << "peerConnectionOnConnectionStateChange() failed, status: " << status;
        return false;
    }

    return true;
}

void WebRTCPublisher::stop() {
    if (m_d->peer != nullptr) {
        closePeerConnection(m_d->peer);
        if (m_d->videoReceiver != nullptr) {
            freeTransceiver(&m_d->videoReceiver);
            m_d->videoReceiver = nullptr;
        }
        if (m_d->audioReceiver != nullptr) {
            freeTransceiver(&m_d->audioReceiver);
            m_d->audioReceiver = nullptr;
        }

        freePeerConnection(&m_d->peer);
        m_d->peer = nullptr;
    }
}

void WebRTCPublisher::exchangeSdp() {
}