void ThreadManager::Impl::addWorker(size_t value) {
std::set<shared_ptr<Thread> > newThreads;
for (size_t ix = 0; ix < value; ix++) {
- class ThreadManager::Worker;
shared_ptr<ThreadManager::Worker> worker = shared_ptr<ThreadManager::Worker>(new ThreadManager::Worker(this));
newThreads.insert(threadFactory_->newThread(worker));
}
private:
shared_ptr<Runnable> runnable_;
- class TimerManager::Dispatcher;
friend class TimerManager::Dispatcher;
STATE state_;
};
} else if( ((offset_ + readState_.bufferPtr_ - 4)/chunkSize_) !=
((offset_ + readState_.bufferPtr_ + readState_.event_->eventSize_ - 1)/chunkSize_) ) {
// 3. size indicates that event crosses chunk boundary
- T_ERROR("Read corrupt event. Event crosses chunk boundary. Event size:%u Offset:%ld",
- readState_.event_->eventSize_, offset_ + readState_.bufferPtr_ + 4);
+ T_ERROR("Read corrupt event. Event crosses chunk boundary. Event size:%u Offset:%lld",
+ readState_.event_->eventSize_,
+ (long long int) (offset_ + readState_.bufferPtr_ + 4));
+
return true;
}
readState_.resetState(readState_.lastDispatchPtr_);
currentEvent_ = NULL;
char errorMsg[1024];
- sprintf(errorMsg, "TFileTransport: log file corrupted at offset: %lu",
- offset_ + readState_.lastDispatchPtr_);
+ sprintf(errorMsg, "TFileTransport: log file corrupted at offset: %lld",
+ (long long int) (offset_ + readState_.lastDispatchPtr_));
+
GlobalOutput(errorMsg);
throw TTransportException(errorMsg);
}
// cannot seek past EOF
bool seekToEnd = false;
- uint32_t minEndOffset = 0;
+ off_t minEndOffset = 0;
if (chunk >= numChunks) {
T_DEBUG("Trying to seek past EOF. Seeking to EOF instead...");
seekToEnd = true;
}
uint32_t TZlibTransport::read(uint8_t* buf, uint32_t len) {
- int need = len;
+ uint32_t need = len;
// TODO(dreiss): Skip urbuf on big reads.
while (true) {
// Copy out whatever we have available, then give them the min of
// what we have and what they want, then advance indices.
- int give = std::min(readAvail(), need);
+ int give = std::min((uint32_t) readAvail(), need);
memcpy(buf, urbuf_ + urpos_, give);
need -= give;
buf += give;
// zlib's "deflate" function has enough logic in it that I think
// we're better off (performance-wise) buffering up small writes.
- if ((int)len > MIN_DIRECT_DEFLATE_SIZE) {
+ if (len > MIN_DIRECT_DEFLATE_SIZE) {
flushToZlib(uwbuf_, uwpos_, Z_NO_FLUSH);
uwpos_ = 0;
flushToZlib(buf, len, Z_NO_FLUSH);
} else if (len > 0) {
- if (uwbuf_size_ - uwpos_ < (int)len) {
+ if (uwbuf_size_ - uwpos_ < len) {
flushToZlib(uwbuf_, uwpos_, Z_NO_FLUSH);
uwpos_ = 0;
}
protected:
// Writes smaller than this are buffered up.
// Larger (or equal) writes are dumped straight to zlib.
- static const int MIN_DIRECT_DEFLATE_SIZE = 32;
+ static const uint32_t MIN_DIRECT_DEFLATE_SIZE = 32;
boost::shared_ptr<TTransport> transport_;
/// True iff we have finished the output stream.
bool output_finished_;
- int urbuf_size_;
- int crbuf_size_;
- int uwbuf_size_;
- int cwbuf_size_;
+ uint32_t urbuf_size_;
+ uint32_t crbuf_size_;
+ uint32_t uwbuf_size_;
+ uint32_t cwbuf_size_;
uint8_t* urbuf_;
uint8_t* crbuf_;
// Repeatability. Kind of.
std::srand(42);
- for (int i = 0; i < (int)(sizeof(data)/sizeof(data[0])); ++i) {
+ for (size_t i = 0; i < (sizeof(data)/sizeof(data[0])); ++i) {
data[i] = (uint8_t)rand();
}
1<<14, 1<<17,
};
- for (int i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
+ for (size_t i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
int size = sizes[i];
for (int d1 = 0; d1 < 3; d1++) {
shared_ptr<TMemoryBuffer> buffer(new TMemoryBuffer(16));
1<<14, 1<<17,
};
- for (int i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
+ for (size_t i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
int size = sizes[i];
for (int d1 = 0; d1 < 3; d1++) {
shared_ptr<TMemoryBuffer> buffer(new TMemoryBuffer(data, sizeof(data)));
1<<14, 1<<17,
};
- for (int i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
+ for (size_t i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
int size = sizes[i];
for (int d1 = 0; d1 < 3; d1++) {
shared_ptr<TMemoryBuffer> buffer(new TMemoryBuffer(data, sizeof(data)));
1<<14, 1<<17,
};
- for (int i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
+ for (size_t i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
int size = sizes[i];
for (int d1 = 0; d1 < 3; d1++) {
shared_ptr<TMemoryBuffer> buffer(new TMemoryBuffer(16));
int probs[] = { 1, 2, 4, 8, 16, 32, };
- for (int i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
+ for (size_t i = 0; i < sizeof (sizes) / sizeof (sizes[0]); i++) {
int size = sizes[i];
- for (int j = 0; j < sizeof (probs) / sizeof (probs[0]); j++) {
+ for (size_t j = 0; j < sizeof (probs) / sizeof (probs[0]); j++) {
int prob = probs[j];
for (int d1 = 0; d1 < 3; d1++) {
shared_ptr<TMemoryBuffer> buffer(new TMemoryBuffer(16));
int read_offset = 0;
int read_index = 0;
- for (int k = 0; k < flush_sizes.size(); k++) {
+ for (unsigned int k = 0; k < flush_sizes.size(); k++) {
int fsize = flush_sizes[k];
// We are exploiting an implementation detail of TFramedTransport.
// The read buffer starts empty and it will never do more than one
const FsyncLog::CallList* calls = log.getCalls();
// We added 1 fsync call above.
// Make sure TFileTransport called fsync at least once
- BOOST_CHECK_GT(calls->size(), 1);
+ BOOST_CHECK_GT(calls->size(),
+ static_cast<FsyncLog::CallList::size_type>(1));
const struct timeval* prev_time = NULL;
for (FsyncLog::CallList::const_iterator it = calls->begin();
}
void parse_args(int argc, char* argv[]) {
- int seed;
- int *seedptr = NULL;
-
struct option long_opts[] = {
{ "help", false, NULL, 'h' },
{ "tmp-dir", true, NULL, 't' },
transports.out->flush();
set_trigger(3, transports.out, 1);
uint32_t bytes_read = transports.in->read(read_buf, 10);
- BOOST_CHECK_EQUAL(numTriggersFired, 0);
- BOOST_CHECK_EQUAL(bytes_read, 9);
+ BOOST_CHECK_EQUAL(numTriggersFired, (unsigned int) 0);
+ BOOST_CHECK_EQUAL(bytes_read, (uint32_t) 9);
clear_triggers();
}
// Now read 4 bytes, so that we are partway through the written data.
uint32_t bytes_read = transports.in->read(read_buf, 4);
- BOOST_CHECK_EQUAL(bytes_read, 4);
+ BOOST_CHECK_EQUAL(bytes_read, (uint32_t) 4);
// Now attempt to read 10 bytes. Only 9 more are available.
//
while (total_read < 9) {
set_trigger(3, transports.out, 1);
bytes_read = transports.in->read(read_buf, 10);
- BOOST_REQUIRE_EQUAL(numTriggersFired, 0);
- BOOST_REQUIRE_GT(bytes_read, 0);
+ BOOST_REQUIRE_EQUAL(numTriggersFired, (unsigned int) 0);
+ BOOST_REQUIRE_GT(bytes_read, (uint32_t) 0);
total_read += bytes_read;
- BOOST_REQUIRE_LE(total_read, 9);
+ BOOST_REQUIRE_LE(total_read, (uint32_t) 9);
}
- BOOST_CHECK_EQUAL(total_read, 9);
+ BOOST_CHECK_EQUAL(total_read, (uint32_t) 9);
clear_triggers();
}
set_trigger(3, transports.out, 1);
uint32_t borrow_len = 10;
const uint8_t* borrowed_buf = transports.in->borrow(read_buf, &borrow_len);
- BOOST_CHECK_EQUAL(numTriggersFired, 0);
+ BOOST_CHECK_EQUAL(numTriggersFired, (unsigned int) 0);
BOOST_CHECK(borrowed_buf == NULL);
clear_triggers();
add_trigger(1, transports.out, 8);
uint32_t bytes_read = transports.in->read(read_buf, 10);
if (bytes_read == 0) {
- BOOST_CHECK_EQUAL(numTriggersFired, 0);
+ BOOST_CHECK_EQUAL(numTriggersFired, (unsigned int) 0);
clear_triggers();
} else {
- BOOST_CHECK_EQUAL(numTriggersFired, 1);
- BOOST_CHECK_EQUAL(bytes_read, 2);
+ BOOST_CHECK_EQUAL(numTriggersFired, (unsigned int) 1);
+ BOOST_CHECK_EQUAL(bytes_read, (uint32_t) 2);
}
clear_triggers();
uint32_t borrow_len = 10;
const uint8_t* borrowed_buf = transports.in->borrow(NULL, &borrow_len);
BOOST_CHECK(borrowed_buf == NULL);
- BOOST_CHECK_EQUAL(numTriggersFired, 0);
+ BOOST_CHECK_EQUAL(numTriggersFired, (unsigned int) 0);
clear_triggers();
}
}
uint32_t got = zlib_trans->read(mirror.get() + tot, read_len);
BOOST_REQUIRE_LE(got, expected_read_len);
- BOOST_REQUIRE_NE(got, 0);
+ BOOST_REQUIRE_NE(got, (uint32_t) 0);
tot += got;
}
TZlibTransport w_zlib_trans(membuf);
}
- BOOST_CHECK_EQUAL(membuf->available_read(), 0);
+ BOOST_CHECK_EQUAL(membuf->available_read(), (uint32_t) 0);
}
/*