/usr/include/glusterfs/event.h is in glusterfs-common 3.13.2-1build1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | /*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef _EVENT_H_
#define _EVENT_H_
#include <pthread.h>
struct event_pool;
struct event_ops;
struct event_slot_poll;
struct event_slot_epoll;
struct event_data {
int idx;
int gen;
} __attribute__ ((__packed__, __may_alias__));
typedef int (*event_handler_t) (int fd, int idx, int gen, void *data,
int poll_in, int poll_out, int poll_err);
#define EVENT_EPOLL_TABLES 1024
#define EVENT_EPOLL_SLOTS 1024
#define EVENT_MAX_THREADS 1024
struct event_pool {
struct event_ops *ops;
int fd;
int breaker[2];
int count;
struct event_slot_poll *reg;
struct event_slot_epoll *ereg[EVENT_EPOLL_TABLES];
int slots_used[EVENT_EPOLL_TABLES];
int used;
int changed;
pthread_mutex_t mutex;
pthread_cond_t cond;
void *evcache;
int evcache_size;
/* NOTE: Currently used only when event processing is done using
* epoll. */
int eventthreadcount; /* number of event threads to execute. */
pthread_t pollers[EVENT_MAX_THREADS]; /* poller thread_id store,
* and live status */
int destroy;
int activethreadcount;
/*
* Number of threads created by auto-scaling, *in addition to* the
* configured number of threads. This is only applicable on the
* server, where we try to keep the number of threads around the number
* of bricks. In that case, the configured number is just "extra"
* threads to handle requests in excess of one per brick (including
* requests on the GlusterD connection). For clients or GlusterD, this
* number will always be zero, so the "extra" is all we have.
*
* TBD: consider auto-scaling for clients as well
*/
int auto_thread_count;
};
struct event_destroy_data {
int readfd;
struct event_pool *pool;
};
struct event_ops {
struct event_pool * (*new) (int count, int eventthreadcount);
int (*event_register) (struct event_pool *event_pool, int fd,
event_handler_t handler,
void *data, int poll_in, int poll_out);
int (*event_select_on) (struct event_pool *event_pool, int fd, int idx,
int poll_in, int poll_out);
int (*event_unregister) (struct event_pool *event_pool, int fd, int idx);
int (*event_unregister_close) (struct event_pool *event_pool, int fd,
int idx);
int (*event_dispatch) (struct event_pool *event_pool);
int (*event_reconfigure_threads) (struct event_pool *event_pool,
int newcount);
int (*event_pool_destroy) (struct event_pool *event_pool);
int (*event_handled) (struct event_pool *event_pool, int fd, int idx,
int gen);
};
struct event_pool *event_pool_new (int count, int eventthreadcount);
int event_select_on (struct event_pool *event_pool, int fd, int idx,
int poll_in, int poll_out);
int event_register (struct event_pool *event_pool, int fd,
event_handler_t handler,
void *data, int poll_in, int poll_out);
int event_unregister (struct event_pool *event_pool, int fd, int idx);
int event_unregister_close (struct event_pool *event_pool, int fd, int idx);
int event_dispatch (struct event_pool *event_pool);
int event_reconfigure_threads (struct event_pool *event_pool, int value);
int event_pool_destroy (struct event_pool *event_pool);
int event_dispatch_destroy (struct event_pool *event_pool);
int event_handled (struct event_pool *event_pool, int fd, int idx, int gen);
#endif /* _EVENT_H_ */
|