linux muduo 编译安装,muduo记录
1.muduo编译安装
编译muduo遇见的报错可以在github上的issue上面查找。一般都能顺利解决,我遇到的就是没有安装boost-dev.
centos7系统 执行: sudo yum install boost-dev
2.截取流程图
图片截取自《Linux多线程服务端编程:使用muduo C++网络库》
3.源码摘录
摘录一个examples里面的pingpong为例
server.cc 设置回调,开启服务
void onConnection(const TcpConnectionPtr& conn)
{
if (conn->connected())
{
conn->setTcpNoDelay(true);
}
}
void onMessage(const TcpConnectionPtr& conn, Buffer* buf, Timestamp)
{
conn->send(buf);
}
int main(int argc, char* argv[])
{
if (argc < 4)
{
fprintf(stderr, "Usage: server
\n");}
else
{
LOG_INFO << "pid = " << getpid() << ", tid = " << CurrentThread::tid();
Logger::setLogLevel(Logger::WARN);
const char* ip = argv[1];
uint16_t port = static_cast(atoi(argv[2]));
InetAddress listenAddr(ip, port);
int threadCount = atoi(argv[3]);
EventLoop loop;
TcpServer server(&loop, listenAddr, "PingPong");
server.setConnectionCallback(onConnection);
server.setMessageCallback(onMessage);
if (threadCount > 1)
{
server.setThreadNum(threadCount);
}
server.start();
loop.loop();
}
}
main函数中新建TcpServer 对象,TcpServer构造函数会新建Acceptor对象。Acceptor构造函数中定义了channel对象。channel中包含EventLoop。EventLoop里面根据poller来获取各种事件。
代码可以从main入手,从下往上看。这里便于理清原理,倒叙表达
1.poller 2种模式之epoll
fillActiveChannels(numEvents, activeChannels);
channel->set_revents(events_[i].events);
EPollPoller::EPollPoller(EventLoop* loop)
: Poller(loop),
epollfd_(::epoll_create1(EPOLL_CLOEXEC)),
events_(kInitEventListSize)
{
if (epollfd_ < 0)
{
LOG_SYSFATAL << "EPollPoller::EPollPoller";
}
}
EPollPoller::~EPollPoller()
{
::close(epollfd_);
}
Timestamp EPollPoller::poll(int timeoutMs, ChannelList* activeChannels)
{
LOG_TRACE << "fd total count " << channels_.size();
int numEvents = ::epoll_wait(epollfd_,
&*events_.begin(),
static_cast(events_.size()),
timeoutMs);
int savedErrno = errno;
Timestamp now(Timestamp::now());
if (numEvents > 0)
{
LOG_TRACE << numEvents << " events happened";
fillActiveChannels(numEvents, activeChannels);
if (implicit_cast(numEvents) == events_.size())
{
events_.resize(events_.size()*2);
}
}
else if (numEvents == 0)
{
LOG_TRACE << "nothing happened";
}
else
{
// error happens, log uncommon ones
if (savedErrno != EINTR)
{
errno = savedErrno;
LOG_SYSERR << "EPollPoller::poll()";
}
}
return now;
}
Poller* Poller::newDefaultPoller(EventLoop* loop)
{
if (::getenv("MUDUO_USE_POLL"))
{
return new PollPoller(loop);
}
else
{
return new EPollPoller(loop);
}
}
void EPollPoller::fillActiveChannels(int numEvents,
ChannelList* activeChannels) const
{
assert(implicit_cast(numEvents) <= events_.size());
for (int i = 0; i < numEvents; ++i)
{
Channel* channel = static_cast(events_[i].data.ptr);
#ifndef NDEBUG
int fd = channel->fd();
ChannelMap::const_iterator it = channels_.find(fd);
assert(it != channels_.end());
assert(it->second == channel);
#endif
channel->set_revents(events_[i].events);
activeChannels->push_back(channel);
}
}
2.EventLoop 获取各种事件
poller_(Poller::newDefaultPoller(this))
pollReturnTime_ = poller_->poll(kPollTimeMs, &activeChannels_);
currentActiveChannel_->handleEvent(pollReturnTime_);
EventLoop::EventLoop()
: looping_(false),
quit_(false),
eventHandling_(false),
callingPendingFunctors_(false),
iteration_(0),
threadId_(CurrentThread::tid()),
poller_(Poller::newDefaultPoller(this)),
timerQueue_(new TimerQueue(this)),
wakeupFd_(createEventfd()),
wakeupChannel_(new Channel(this, wakeupFd_)),
currentActiveChannel_(NULL)
{
LOG_DEBUG << "EventLoop created " << this << " in thread " << threadId_;
if (t_loopInThisThread)
{
LOG_FATAL << "Another EventLoop " << t_loopInThisThread
<< " exists in this thread " << threadId_;
}
else
{
t_loopInThisThread = this;
}
wakeupChannel_->setReadCallback(
boost::bind(&EventLoop::handleRead, this));
// we are always reading the wakeupfd
wakeupChannel_->enableReading();
}
void EventLoop::loop()
{
assert(!looping_);
assertInLoopThread();
looping_ = true;
quit_ = false; // FIXME: what if someone calls quit() before loop() ?
LOG_TRACE << "EventLoop " << this << " start looping";
while (!quit_)
{
activeChannels_.clear();
pollReturnTime_ = poller_->poll(kPollTimeMs, &activeChannels_);
++iteration_;
if (Logger::logLevel() <= Logger::TRACE)
{
printActiveChannels();
}
// TODO sort channel by priority
eventHandling_ = true;
for (ChannelList::iterator it = activeChannels_.begin();
it != activeChannels_.end(); ++it)
{
currentActiveChannel_ = *it;
currentActiveChannel_->handleEvent(pollReturnTime_);
}
currentActiveChannel_ = NULL;
eventHandling_ = false;
doPendingFunctors();
}
LOG_TRACE << "EventLoop " << this << " stop looping";
looping_ = false;
}
3.channel 分发事件根据EventLoop
revents_ 在epoll中已经设置
void Channel::handleEvent(Timestamp receiveTime)
{
boost::shared_ptr guard;
if (tied_)
{
guard = tie_.lock();
if (guard)
{
handleEventWithGuard(receiveTime);
}
}
else
{
handleEventWithGuard(receiveTime);
}
}
void Channel::handleEventWithGuard(Timestamp receiveTime)
{
eventHandling_ = true;
LOG_TRACE << reventsToString();
if ((revents_ & POLLHUP) && !(revents_ & POLLIN))
{
if (logHup_)
{
LOG_WARN << "fd = " << fd_ << " Channel::handle_event() POLLHUP";
}
if (closeCallback_) closeCallback_();
}
if (revents_ & POLLNVAL)
{
LOG_WARN << "fd = " << fd_ << " Channel::handle_event() POLLNVAL";
}
if (revents_ & (POLLERR | POLLNVAL))
{
if (errorCallback_) errorCallback_();
}
if (revents_ & (POLLIN | POLLPRI | POLLRDHUP))
{
if (readCallback_) readCallback_(receiveTime);
}
if (revents_ & POLLOUT)
{
if (writeCallback_) writeCallback_();
}
eventHandling_ = false;
}
4.Acceptor
loop_(loop),
acceptChannel_(loop, acceptSocket_.fd()),
acceptChannel_.setReadCallback(boost::bind(&Acceptor::handleRead, this));
Acceptor::Acceptor(EventLoop* loop, const InetAddress& listenAddr, bool reuseport)
: loop_(loop),
acceptSocket_(sockets::createNonblockingOrDie(listenAddr.family())),
acceptChannel_(loop, acceptSocket_.fd()),
listenning_(false),
idleFd_(::open("/dev/null", O_RDONLY | O_CLOEXEC))
{
assert(idleFd_ >= 0);
acceptSocket_.setReuseAddr(true);
acceptSocket_.setReusePort(reuseport);
acceptSocket_.bindAddress(listenAddr);
acceptChannel_.setReadCallback(
boost::bind(&Acceptor::handleRead, this));
}
void Acceptor::handleRead()
{
loop_->assertInLoopThread();
InetAddress peerAddr;
//FIXME loop until no more
int connfd = acceptSocket_.accept(&peerAddr);
if (connfd >= 0)
{
// string hostport = peerAddr.toIpPort();
// LOG_TRACE << "Accepts of " << hostport;
if (newConnectionCallback_)
{
newConnectionCallback_(connfd, peerAddr);
}
else
{
sockets::close(connfd);
}
}
else
{
LOG_SYSERR << "in Acceptor::handleRead";
// Read the section named "The special problem of
// accept()ing when you can't" in libev's doc.
// By Marc Lehmann, author of libev.
if (errno == EMFILE)
{
::close(idleFd_);
idleFd_ = ::accept(acceptSocket_.fd(), NULL, NULL);
::close(idleFd_);
idleFd_ = ::open("/dev/null", O_RDONLY | O_CLOEXEC);
}
}
}
5.TcpServer
acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)),
ioLoop->runInLoop(boost::bind(&TcpConnection::connectEstablished, conn));
tcpserver 在得到⌈ 新连接newConnection以后,会新建一个 TcpConnection ⌋来处理后续协议报文的发送接收
TcpServer::TcpServer(EventLoop* loop,
const InetAddress& listenAddr,
const string& nameArg,
Option option)
: loop_(CHECK_NOTNULL(loop)),
ipPort_(listenAddr.toIpPort()),
name_(nameArg),
acceptor_(new Acceptor(loop, listenAddr, option == kReusePort)),
threadPool_(new EventLoopThreadPool(loop, name_)),
connectionCallback_(defaultConnectionCallback),
messageCallback_(defaultMessageCallback),
nextConnId_(1)
{
acceptor_->setNewConnectionCallback(
boost::bind(&TcpServer::newConnection, this, _1, _2));
}
void TcpServer::newConnection(int sockfd, const InetAddress& peerAddr)
{
loop_->assertInLoopThread();
EventLoop* ioLoop = threadPool_->getNextLoop();
char buf[64];
snprintf(buf, sizeof buf, "-%s#%d", ipPort_.c_str(), nextConnId_);
++nextConnId_;
string connName = name_ + buf;
LOG_INFO << "TcpServer::newConnection [" << name_
<< "] - new connection [" << connName
<< "] from " << peerAddr.toIpPort();
InetAddress localAddr(sockets::getLocalAddr(sockfd));
// FIXME poll with zero timeout to double confirm the new connection
// FIXME use make_shared if necessary
TcpConnectionPtr conn(new TcpConnection(ioLoop,
connName,
sockfd,
localAddr,
peerAddr));
connections_[connName] = conn;
conn->setConnectionCallback(connectionCallback_);
conn->setMessageCallback(messageCallback_);
conn->setWriteCompleteCallback(writeCompleteCallback_);
conn->setCloseCallback(
boost::bind(&TcpServer::removeConnection, this, _1)); // FIXME: unsafe
ioLoop->runInLoop(boost::bind(&TcpConnection::connectEstablished, conn));
}
void TcpServer::start()
{
if (started_.getAndSet(1) == 0)
{
threadPool_->start(threadInitCallback_);
assert(!acceptor_->listenning());
loop_->runInLoop(
boost::bind(&Acceptor::listen, get_pointer(acceptor_)));
}
}
6.TcpConnection
注册各种IO事件
TcpConnection::TcpConnection(EventLoop* loop,
const string& nameArg,
int sockfd,
const InetAddress& localAddr,
const InetAddress& peerAddr)
: loop_(CHECK_NOTNULL(loop)),
name_(nameArg),
state_(kConnecting),
reading_(true),
socket_(new Socket(sockfd)),
channel_(new Channel(loop, sockfd)),
localAddr_(localAddr),
peerAddr_(peerAddr),
highWaterMark_(64*1024*1024)
{
channel_->setReadCallback(
boost::bind(&TcpConnection::handleRead, this, _1));
channel_->setWriteCallback(
boost::bind(&TcpConnection::handleWrite, this));
channel_->setCloseCallback(
boost::bind(&TcpConnection::handleClose, this));
channel_->setErrorCallback(
boost::bind(&TcpConnection::handleError, this));
LOG_DEBUG << "TcpConnection::ctor[" << name_ << "] at " << this
<< " fd=" << sockfd;
socket_->setKeepAlive(true);
}
7.main使用
EventLoop loop;
TcpServer server(&loop, listenAddr, "PingPong");
server.start();
loop.loop();
void onConnection(const TcpConnectionPtr& conn)
{
if (conn->connected())
{
conn->setTcpNoDelay(true);
}
}
void onMessage(const TcpConnectionPtr& conn, Buffer* buf, Timestamp)
{
conn->send(buf);
}
int main(int argc, char* argv[])
{
if (argc < 4)
{
fprintf(stderr, "Usage: server
\n");}
else
{
LOG_INFO << "pid = " << getpid() << ", tid = " << CurrentThread::tid();
Logger::setLogLevel(Logger::WARN);
const char* ip = argv[1];
uint16_t port = static_cast(atoi(argv[2]));
InetAddress listenAddr(ip, port);
int threadCount = atoi(argv[3]);
EventLoop loop;
TcpServer server(&loop, listenAddr, "PingPong");
server.setConnectionCallback(onConnection);
server.setMessageCallback(onMessage);
if (threadCount > 1)
{
server.setThreadNum(threadCount);
}
server.start();
loop.loop();
}
}
linux muduo 编译安装,muduo记录相关推荐
- ubuntu18.04 muduo编译安装
ubuntu18.04 muduo编译安装 muduo采用CMake安装 sudo apt-get install cmake muduo依赖boost库 sudo apt-get install l ...
- linux 保存编译log,(转)Linux下编译安装log4cxx
一个项目的服务器端在Linux平台下,用到了开源日志库log4cxx,这个库是apache项目的一个子库.功能很不错.下面记录下它的编译和安装过程. 第一步安装apr-1.3.8,顺序不能错,它必须首 ...
- Linux apache编译安装
Linux apache编译安装 1.下载httpd-2.2.15.tar.gz wget http://mirror.bjtu.edu.cn/apache/httpd/httpd-2.2.17.t ...
- linux如何编译boost指定库,linux下编译安装boost库
linux下编译安装boost库 先看一下系统环境 Linux o-pc 3.19.0-22-generic #22-Ubuntu SMP Tue Jun 16 17:15:15 UTC 2015 x ...
- linux PHP 编译安装参数详解
linux PHP 编译安装参数详解 ./configure --prefix=/usr/local/php --with-config-file-path=/usr/local/php/etc -- ...
- linux iptables 编译,Linux下编译安装iptables
Linux下如何编译安装iptables实例: 先卸载系统已经安装的iptables,卸载前需备份三个文档:iptables启动脚本,iptables-config配置文档,以及已经建立好的iptab ...
- linux3.10.53编译,根据官方文档在Linux下编译安装Apache
根据官方文档在Linux下编译安装Apache 前言 永远记住官方文档才是最准确的安装手册,这篇文章仅为对官方文档的解读和补充,学习提升务必阅读官方文档: http://httpd.apache.or ...
- linux php7.2编译安装,Linux下编译安装php7.2
准备工作 所有操作需要在root用户下 安装路径: /usr/local/php 安装PHP 首先要安装如下依赖包 $ yum install -y gcc gcc-c++ make zlib zl ...
- Linux下编译安装MySQL安装
Linux下编译安装MySQL安装 博主邮箱www.zzher@foxmail.com qq:1102471911 编译安装MySQL 准备工作: 1.获得以下所需的源代码包,并存放在/usr/l ...
最新文章
- git修改远程仓库地址
- HDU 2191 悼念512汶川大地震遇难同胞——珍惜现在,感恩生活(多重背包)
- 自己学习Foundation一些类
- 人工智能的发展之路,居然要从春秋时期讲起?
- 通过OWA修改密码,提示您输入的密码不符合最低安全要求
- CORS 跨域 实现思路及相关解决方案(转:http://www.cnblogs.com/sloong/p/cors.html)
- 今年怪事特别多 时代盘点09十大奇闻
- 电脑上交计算机作业怎么打开,上海交大计算机第一次作业-20210711011739.docx-原创力文档...
- 超大气APP下载页双语多国语言网站源码
- python cookie池_Python爬虫scrapy框架Cookie池(微博Cookie池)的使用
- SAP License:自动创建带内部订单预算管控的在建工程
- 蓝桥杯2020年第十一届C/C++省赛B组第二题-既约分数
- PHP CURL实现远程下载文件到本地
- 零售券商之王:零佣金的罗宾侠的400亿美金
- 初学者,学哪一种3d模型设计软件比较好?
- exchange创建邮箱组_Exchange 2010 如何创建通讯组
- 浏览器HTML5 写入文件
- 分享一个数据库在线文档系统
- windows系统卸载VMware Workstation 并删除注册表残留信息
- XenApp6.5产品BUG